xref: /aosp_15_r20/external/crosvm/devices/src/virtio/iommu/memory_mapper.rs (revision bb4ee6a4ae7042d18b07a98463b9c8b875e44b39)
1 // Copyright 2021 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 //! MemoryMapper trait and basic impl for virtio-iommu implementation
6 //!
7 //! All the addr/range ends in this file are exclusive.
8 
9 use std::any::Any;
10 use std::collections::BTreeMap;
11 use std::sync::atomic::AtomicU32;
12 use std::sync::atomic::Ordering;
13 
14 use anyhow::anyhow;
15 use anyhow::bail;
16 use anyhow::Context;
17 use anyhow::Result;
18 use base::warn;
19 use base::AsRawDescriptors;
20 use base::Event;
21 use base::Protection;
22 use base::RawDescriptor;
23 use cros_async::EventAsync;
24 use cros_async::Executor;
25 use resources::AddressRange;
26 use serde::Deserialize;
27 use serde::Serialize;
28 use vm_memory::GuestAddress;
29 
30 #[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
31 pub struct MemRegion {
32     pub gpa: GuestAddress,
33     pub len: u64,
34     pub prot: Protection,
35 }
36 
37 /// Manages the mapping from a guest IO virtual address space to the guest physical address space
38 #[derive(Debug)]
39 pub struct MappingInfo {
40     pub iova: u64,
41     pub gpa: GuestAddress,
42     pub size: u64,
43     pub prot: Protection,
44 }
45 
46 impl MappingInfo {
47     #[allow(dead_code)]
new(iova: u64, gpa: GuestAddress, size: u64, prot: Protection) -> Result<Self>48     fn new(iova: u64, gpa: GuestAddress, size: u64, prot: Protection) -> Result<Self> {
49         if size == 0 {
50             bail!("can't create 0 sized region");
51         }
52         iova.checked_add(size).context("iova overflow")?;
53         gpa.checked_add(size).context("gpa overflow")?;
54         Ok(Self {
55             iova,
56             gpa,
57             size,
58             prot,
59         })
60     }
61 }
62 
63 struct ExportState {
64     // List of exported regions. Exported regions can overlap.
65     exported: Vec<AddressRange>,
66 
67     // Event used to signal the client device when there is a fault.
68     fault_event: Event,
69 
70     // Event used to signal virtio-iommu when the fault is resolved.
71     fault_resolved_event_internal: Event,
72     // Clone of the above event returned to virtio-iommu when a fault occurs.
73     fault_resolved_event_external: Option<EventAsync>,
74 }
75 
76 impl ExportState {
new(ex: &Executor) -> Result<(Self, Event)>77     fn new(ex: &Executor) -> Result<(Self, Event)> {
78         let fault_event = Event::new().context("failed to create fault_event")?;
79         let fault_resolved_event = Event::new().context("failed to create resolve event")?;
80 
81         Ok((
82             Self {
83                 exported: Vec::new(),
84                 fault_event: fault_event
85                     .try_clone()
86                     .context("failed to clone fault event")?,
87                 fault_resolved_event_internal: fault_resolved_event
88                     .try_clone()
89                     .context("failed to clone resolve event")?,
90                 fault_resolved_event_external: Some(
91                     EventAsync::new(fault_resolved_event, ex)
92                         .context("failed to create async resolve event")?,
93                 ),
94             },
95             fault_event,
96         ))
97     }
98 
on_fault(&mut self) -> Option<EventAsync>99     fn on_fault(&mut self) -> Option<EventAsync> {
100         let ret = self.fault_resolved_event_external.take();
101         if ret.is_some() {
102             self.fault_event.signal().expect("failed to signal fault");
103         }
104         ret
105     }
106 
can_export(&self) -> bool107     fn can_export(&self) -> bool {
108         self.fault_resolved_event_external.is_some()
109     }
110 }
111 
112 // A basic iommu. It is designed as a building block for virtio-iommu.
113 pub struct BasicMemoryMapper {
114     maps: BTreeMap<u64, MappingInfo>, // key = MappingInfo.iova
115     mask: u64,
116     id: u32,
117     export_state: Option<ExportState>,
118 }
119 
120 pub enum RemoveMapResult {
121     // The removal was successful. If the event is Some, it must be waited on before
122     // informing the guest that the unmapping completed.
123     Success(Option<EventAsync>),
124     // The removal failed because the range partially overlapped a mapping.
125     OverlapFailure,
126 }
127 
128 #[derive(PartialEq, Eq, Debug)]
129 pub enum AddMapResult {
130     Ok,
131     OverlapFailure,
132 }
133 
134 /// A generic interface for vfio and other iommu backends
135 ///
136 /// This interface includes APIs to supports allowing clients within crosvm (e.g.
137 /// the VVU proxy) which are configured to sit behind a virtio-iommu device to
138 /// access memory via IO virtual address (IOVA). This is done by exporting mapped
139 /// memory to the client. The virtio-iommu device can manage many mappers
140 /// simultaneously. The current implementation has a 1-to-1 relationship between
141 /// mappers and clients, although this may be extended to 1-to-N to fully support
142 /// the virtio-iommu API.
143 ///
144 /// Clients must only access memory while it is mapped into the virtio-iommu device.
145 /// As such, this interface has a concept of an "IOMMU fault".  An IOMMU fault is
146 /// triggered when the guest removes a mapping that includes memory that is exported
147 /// but not yet released. This includes if |reset_domain| is called while any memory
148 /// is exported. When an IOMMU fault occurs, the event returned by
149 /// |start_export_session| is signaled, and the client must immediately release any
150 /// exported memory.
151 ///
152 /// From the virtio-iommu's perspective, if |remove_map| or |reset_domain| triggers
153 /// an IOMMU fault, then an eventfd will be returned. It must wait on that event
154 /// until all exported regions have been released, at which point it can complete
155 /// the virtio request that triggered the fault.
156 ///
157 /// As such, the flow of a fault is:
158 ///  1) The guest sends an virtio-iommu message that triggers a fault. Faults can be triggered by
159 ///     unmap or detach messages, or by attach messages if such messages are re-attaching an
160 ///     endpoint to a new domain. One example of a guest event that can trigger such a message is a
161 ///     userspace VVU device process crashing and triggering the guest kernel to re-attach the VVU
162 ///     device to the null endpoint.
163 ///  2) The viommu device removes an exported mapping from the mapper.
164 ///  3) The mapper signals the IOMMU fault eventfd and returns the fault resolution event to the
165 ///     viommu device.
166 ///  4) The viommu device starts waiting on the fault resolution event. Note that although the
167 ///     viommu device and mapper are both running on the same executor, this wait is async. This
168 ///     means that although further processing of virtio-iommu requests is paused, the mapper
169 ///     continues to run.
170 ///  5) The client receives the IOMMU fault.
171 ///  6) The client releases all exported regions.
172 ///  7) Once the mapper receives the final release message from the client, it signals the fault
173 ///     resolution event that the viommu device is waiting on.
174 ///  8) The viommu device finishes processing the original virtio iommu request and sends a reply to
175 ///     the guest.
176 pub trait MemoryMapper: Send {
177     /// Creates a new mapping. If the mapping overlaps with an existing
178     /// mapping, return Ok(false).
add_map(&mut self, new_map: MappingInfo) -> Result<AddMapResult>179     fn add_map(&mut self, new_map: MappingInfo) -> Result<AddMapResult>;
180 
181     /// Removes all mappings within the specified range.
remove_map(&mut self, iova_start: u64, size: u64) -> Result<RemoveMapResult>182     fn remove_map(&mut self, iova_start: u64, size: u64) -> Result<RemoveMapResult>;
183 
get_mask(&self) -> Result<u64>184     fn get_mask(&self) -> Result<u64>;
185 
186     /// Whether or not endpoints can be safely detached from this mapper.
supports_detach(&self) -> bool187     fn supports_detach(&self) -> bool;
188     /// Resets the mapper's domain back into its initial state. Only necessary
189     /// if |supports_detach| returns true.
reset_domain(&mut self) -> Option<EventAsync>190     fn reset_domain(&mut self) -> Option<EventAsync> {
191         None
192     }
193 
194     /// Gets an identifier for the MemoryMapper instance. Must be unique among
195     /// instances of the same trait implementation.
id(&self) -> u32196     fn id(&self) -> u32;
197 
198     /// Starts an export session with the mapper.
199     ///
200     /// Returns an event which is signaled if exported memory is unmapped (i.e. if
201     /// a fault occurs). Once a fault occurs, no new regions may be exported for
202     /// that session. The client must watch for this event and immediately release
203     /// all exported regions.
204     ///
205     /// Only one session can be active at a time. A new session can only be created if
206     /// the previous session has no remaining exported regions.
start_export_session(&mut self, _ex: &Executor) -> Result<Event>207     fn start_export_session(&mut self, _ex: &Executor) -> Result<Event> {
208         bail!("not supported");
209     }
210 
211     /// Exports the specified IO region.
212     ///
213     /// # Safety
214     ///
215     /// The memory in the region specified by hva and size must be
216     /// memory external to rust.
vfio_dma_map( &mut self, _iova: u64, _hva: u64, _size: u64, _prot: Protection, ) -> Result<AddMapResult>217     unsafe fn vfio_dma_map(
218         &mut self,
219         _iova: u64,
220         _hva: u64,
221         _size: u64,
222         _prot: Protection,
223     ) -> Result<AddMapResult> {
224         bail!("not supported");
225     }
226 
227     /// Multiple MemRegions should be returned when the gpa is discontiguous or perms are different.
export(&mut self, _iova: u64, _size: u64) -> Result<Vec<MemRegion>>228     fn export(&mut self, _iova: u64, _size: u64) -> Result<Vec<MemRegion>> {
229         bail!("not supported");
230     }
231 
232     /// Releases a previously exported region.
233     ///
234     /// If a given IO region is exported multiple times, it must be released multiple times.
release(&mut self, _iova: u64, _size: u64) -> Result<()>235     fn release(&mut self, _iova: u64, _size: u64) -> Result<()> {
236         bail!("not supported");
237     }
238 }
239 
240 pub trait MemoryMapperTrait: MemoryMapper + AsRawDescriptors + Any {}
241 impl<T: MemoryMapper + AsRawDescriptors + Any> MemoryMapperTrait for T {}
242 
243 impl BasicMemoryMapper {
new(mask: u64) -> BasicMemoryMapper244     pub fn new(mask: u64) -> BasicMemoryMapper {
245         static NEXT_ID: AtomicU32 = AtomicU32::new(0);
246         BasicMemoryMapper {
247             maps: BTreeMap::new(),
248             mask,
249             id: NEXT_ID.fetch_add(1, Ordering::Relaxed),
250             export_state: None,
251         }
252     }
253 
254     #[cfg(test)]
len(&self) -> usize255     pub fn len(&self) -> usize {
256         self.maps.len()
257     }
258 
259     #[cfg(test)]
is_empty(&self) -> bool260     pub fn is_empty(&self) -> bool {
261         self.maps.is_empty()
262     }
263 }
264 
265 impl MemoryMapper for BasicMemoryMapper {
add_map(&mut self, new_map: MappingInfo) -> Result<AddMapResult>266     fn add_map(&mut self, new_map: MappingInfo) -> Result<AddMapResult> {
267         if new_map.size == 0 {
268             bail!("can't map 0 sized region");
269         }
270         let new_iova_end = new_map
271             .iova
272             .checked_add(new_map.size)
273             .context("iova overflow")?;
274         new_map
275             .gpa
276             .checked_add(new_map.size)
277             .context("gpa overflow")?;
278         let mut iter = self.maps.range(..new_iova_end);
279         if let Some((_, map)) = iter.next_back() {
280             if map.iova + map.size > new_map.iova {
281                 return Ok(AddMapResult::OverlapFailure);
282             }
283         }
284         self.maps.insert(new_map.iova, new_map);
285         Ok(AddMapResult::Ok)
286     }
287 
remove_map(&mut self, iova_start: u64, size: u64) -> Result<RemoveMapResult>288     fn remove_map(&mut self, iova_start: u64, size: u64) -> Result<RemoveMapResult> {
289         if size == 0 {
290             bail!("can't unmap 0 sized region");
291         }
292         let iova_end = iova_start.checked_add(size).context("iova overflow")?;
293 
294         // So that we invalid requests can be rejected w/o modifying things, check
295         // for partial overlap before removing the maps.
296         let mut to_be_removed = Vec::new();
297         for (key, map) in self.maps.range(..iova_end).rev() {
298             let map_iova_end = map.iova + map.size;
299             if map_iova_end <= iova_start {
300                 // no overlap
301                 break;
302             }
303             if iova_start <= map.iova && map_iova_end <= iova_end {
304                 to_be_removed.push(*key);
305             } else {
306                 return Ok(RemoveMapResult::OverlapFailure);
307             }
308         }
309         for key in to_be_removed {
310             self.maps.remove(&key).expect("map should contain key");
311         }
312         if let Some(export_state) = self.export_state.as_mut() {
313             let removed = AddressRange::from_start_and_size(iova_start, size).unwrap();
314             for export in &export_state.exported {
315                 if export.overlaps(removed) {
316                     return Ok(RemoveMapResult::Success(export_state.on_fault()));
317                 }
318             }
319         }
320         Ok(RemoveMapResult::Success(None))
321     }
322 
get_mask(&self) -> Result<u64>323     fn get_mask(&self) -> Result<u64> {
324         Ok(self.mask)
325     }
326 
supports_detach(&self) -> bool327     fn supports_detach(&self) -> bool {
328         true
329     }
330 
reset_domain(&mut self) -> Option<EventAsync>331     fn reset_domain(&mut self) -> Option<EventAsync> {
332         self.maps.clear();
333         if let Some(export_state) = self.export_state.as_mut() {
334             if !export_state.exported.is_empty() {
335                 return export_state.on_fault();
336             }
337         }
338         None
339     }
340 
id(&self) -> u32341     fn id(&self) -> u32 {
342         self.id
343     }
344 
start_export_session(&mut self, ex: &Executor) -> Result<Event>345     fn start_export_session(&mut self, ex: &Executor) -> Result<Event> {
346         if let Some(export_state) = self.export_state.as_ref() {
347             if !export_state.exported.is_empty() {
348                 bail!("previous export session still active");
349             }
350         }
351 
352         let (export_state, fault_event) = ExportState::new(ex)?;
353         self.export_state = Some(export_state);
354         Ok(fault_event)
355     }
356 
export(&mut self, iova: u64, size: u64) -> Result<Vec<MemRegion>>357     fn export(&mut self, iova: u64, size: u64) -> Result<Vec<MemRegion>> {
358         let export_state = self.export_state.as_mut().context("no export state")?;
359         if !export_state.can_export() {
360             bail!("broken export state");
361         }
362         if size == 0 {
363             bail!("can't translate 0 sized region");
364         }
365 
366         // Regions of contiguous iovas and gpas, and identical permission are merged
367         let iova_end = iova.checked_add(size).context("iova overflow")?;
368         let mut iter = self.maps.range(..iova_end);
369         let mut last_iova = iova_end;
370         let mut regions: Vec<MemRegion> = Vec::new();
371         while let Some((_, map)) = iter.next_back() {
372             if last_iova > map.iova + map.size {
373                 break;
374             }
375             let mut new_region = true;
376 
377             // This is the last region to be inserted / first to be returned when iova >= map.iova
378             let region_len = last_iova - std::cmp::max::<u64>(map.iova, iova);
379             if let Some(last) = regions.last_mut() {
380                 if map.gpa.unchecked_add(map.size) == last.gpa && map.prot == last.prot {
381                     last.gpa = map.gpa;
382                     last.len += region_len;
383                     new_region = false;
384                 }
385             }
386             if new_region {
387                 // If this is the only region to be returned, region_len == size (arg of this
388                 // function)
389                 // iova_end = iova + size
390                 // last_iova = iova_end
391                 // region_len = last_iova - max(map.iova, iova)
392                 //            = iova + size - iova
393                 //            = size
394                 regions.push(MemRegion {
395                     gpa: map.gpa,
396                     len: region_len,
397                     prot: map.prot,
398                 });
399             }
400             if iova >= map.iova {
401                 regions.reverse();
402                 // The gpa of the first region has to be offseted
403                 regions[0].gpa = map
404                     .gpa
405                     .checked_add(iova - map.iova)
406                     .context("gpa overflow")?;
407 
408                 export_state
409                     .exported
410                     .push(AddressRange::from_start_and_end(iova, iova_end - 1));
411 
412                 return Ok(regions);
413             }
414             last_iova = map.iova;
415         }
416 
417         Err(anyhow!("invalid iova {:x} {:x}", iova, size))
418     }
419 
release(&mut self, iova: u64, size: u64) -> Result<()>420     fn release(&mut self, iova: u64, size: u64) -> Result<()> {
421         let to_remove = AddressRange::from_start_and_size(iova, size).context("iova overflow")?;
422         let state = self.export_state.as_mut().context("no export state")?;
423 
424         match state.exported.iter().position(|r| r == &to_remove) {
425             Some(idx) => {
426                 state.exported.swap_remove(idx);
427             }
428             None => {
429                 warn!("tried to release unknown range: {:?}", to_remove);
430                 return Ok(());
431             }
432         }
433 
434         if state.exported.is_empty() && state.fault_resolved_event_external.is_none() {
435             state
436                 .fault_resolved_event_internal
437                 .signal()
438                 .expect("failed to resolve fault");
439         }
440 
441         Ok(())
442     }
443 }
444 
445 impl AsRawDescriptors for BasicMemoryMapper {
as_raw_descriptors(&self) -> Vec<RawDescriptor>446     fn as_raw_descriptors(&self) -> Vec<RawDescriptor> {
447         Vec::new()
448     }
449 }
450 
451 #[cfg(test)]
452 mod tests {
453     use std::fmt::Debug;
454 
455     use super::*;
456 
assert_overlap_failure(val: RemoveMapResult)457     fn assert_overlap_failure(val: RemoveMapResult) {
458         match val {
459             RemoveMapResult::OverlapFailure => (),
460             _ => unreachable!(),
461         }
462     }
463 
464     #[test]
test_mapping_info()465     fn test_mapping_info() {
466         // Overflow
467         MappingInfo::new(u64::MAX - 1, GuestAddress(1), 2, Protection::read()).unwrap_err();
468         MappingInfo::new(1, GuestAddress(u64::MAX - 1), 2, Protection::read()).unwrap_err();
469         MappingInfo::new(u64::MAX, GuestAddress(1), 2, Protection::read()).unwrap_err();
470         MappingInfo::new(1, GuestAddress(u64::MAX), 2, Protection::read()).unwrap_err();
471         MappingInfo::new(5, GuestAddress(5), u64::MAX, Protection::read()).unwrap_err();
472         // size = 0
473         MappingInfo::new(1, GuestAddress(5), 0, Protection::read()).unwrap_err();
474     }
475 
476     #[test]
test_map_overlap()477     fn test_map_overlap() {
478         let mut mapper = BasicMemoryMapper::new(u64::MAX);
479         mapper
480             .add_map(
481                 MappingInfo::new(10, GuestAddress(1000), 10, Protection::read_write()).unwrap(),
482             )
483             .unwrap();
484         assert_eq!(
485             mapper
486                 .add_map(
487                     MappingInfo::new(14, GuestAddress(1000), 1, Protection::read_write()).unwrap()
488                 )
489                 .unwrap(),
490             AddMapResult::OverlapFailure
491         );
492         assert_eq!(
493             mapper
494                 .add_map(
495                     MappingInfo::new(0, GuestAddress(1000), 12, Protection::read_write()).unwrap()
496                 )
497                 .unwrap(),
498             AddMapResult::OverlapFailure
499         );
500         assert_eq!(
501             mapper
502                 .add_map(
503                     MappingInfo::new(16, GuestAddress(1000), 6, Protection::read_write()).unwrap()
504                 )
505                 .unwrap(),
506             AddMapResult::OverlapFailure
507         );
508         assert_eq!(
509             mapper
510                 .add_map(
511                     MappingInfo::new(5, GuestAddress(1000), 20, Protection::read_write()).unwrap()
512                 )
513                 .unwrap(),
514             AddMapResult::OverlapFailure
515         );
516     }
517 
518     #[test]
519     // This test is taken from the virtio_iommu spec with translate() calls added
test_map_unmap()520     fn test_map_unmap() {
521         let ex = Executor::new().expect("Failed to create an executor");
522         // #1
523         {
524             let mut mapper = BasicMemoryMapper::new(u64::MAX);
525             mapper.remove_map(0, 4).unwrap();
526         }
527         // #2
528         {
529             let mut mapper = BasicMemoryMapper::new(u64::MAX);
530             let _ = mapper.start_export_session(&ex);
531             mapper
532                 .add_map(
533                     MappingInfo::new(0, GuestAddress(1000), 9, Protection::read_write()).unwrap(),
534                 )
535                 .unwrap();
536             assert_eq!(
537                 mapper.export(0, 1).unwrap()[0],
538                 MemRegion {
539                     gpa: GuestAddress(1000),
540                     len: 1,
541                     prot: Protection::read_write()
542                 }
543             );
544             assert_eq!(
545                 mapper.export(8, 1).unwrap()[0],
546                 MemRegion {
547                     gpa: GuestAddress(1008),
548                     len: 1,
549                     prot: Protection::read_write()
550                 }
551             );
552             mapper.export(9, 1).unwrap_err();
553             mapper.remove_map(0, 9).unwrap();
554             mapper.export(0, 1).unwrap_err();
555         }
556         // #3
557         {
558             let mut mapper = BasicMemoryMapper::new(u64::MAX);
559             let _ = mapper.start_export_session(&ex);
560             mapper
561                 .add_map(
562                     MappingInfo::new(0, GuestAddress(1000), 4, Protection::read_write()).unwrap(),
563                 )
564                 .unwrap();
565             mapper
566                 .add_map(
567                     MappingInfo::new(5, GuestAddress(50), 4, Protection::read_write()).unwrap(),
568                 )
569                 .unwrap();
570             assert_eq!(
571                 mapper.export(0, 1).unwrap()[0],
572                 MemRegion {
573                     gpa: GuestAddress(1000),
574                     len: 1,
575                     prot: Protection::read_write()
576                 }
577             );
578             assert_eq!(
579                 mapper.export(6, 1).unwrap()[0],
580                 MemRegion {
581                     gpa: GuestAddress(51),
582                     len: 1,
583                     prot: Protection::read_write()
584                 }
585             );
586             mapper.remove_map(0, 9).unwrap();
587             mapper.export(0, 1).unwrap_err();
588             mapper.export(6, 1).unwrap_err();
589         }
590         // #4
591         {
592             let mut mapper = BasicMemoryMapper::new(u64::MAX);
593             let _ = mapper.start_export_session(&ex);
594             mapper
595                 .add_map(
596                     MappingInfo::new(0, GuestAddress(1000), 9, Protection::read_write()).unwrap(),
597                 )
598                 .unwrap();
599             assert_overlap_failure(mapper.remove_map(0, 4).unwrap());
600             assert_eq!(
601                 mapper.export(5, 1).unwrap()[0],
602                 MemRegion {
603                     gpa: GuestAddress(1005),
604                     len: 1,
605                     prot: Protection::read_write()
606                 }
607             );
608         }
609         // #5
610         {
611             let mut mapper = BasicMemoryMapper::new(u64::MAX);
612             let _ = mapper.start_export_session(&ex);
613             mapper
614                 .add_map(
615                     MappingInfo::new(0, GuestAddress(1000), 4, Protection::read_write()).unwrap(),
616                 )
617                 .unwrap();
618             mapper
619                 .add_map(
620                     MappingInfo::new(5, GuestAddress(50), 4, Protection::read_write()).unwrap(),
621                 )
622                 .unwrap();
623             assert_eq!(
624                 mapper.export(0, 1).unwrap()[0],
625                 MemRegion {
626                     gpa: GuestAddress(1000),
627                     len: 1,
628                     prot: Protection::read_write()
629                 }
630             );
631             assert_eq!(
632                 mapper.export(5, 1).unwrap()[0],
633                 MemRegion {
634                     gpa: GuestAddress(50),
635                     len: 1,
636                     prot: Protection::read_write()
637                 }
638             );
639             mapper.remove_map(0, 4).unwrap();
640             mapper.export(0, 1).unwrap_err();
641             mapper.export(4, 1).unwrap_err();
642             mapper.export(5, 1).unwrap_err();
643         }
644         // #6
645         {
646             let mut mapper = BasicMemoryMapper::new(u64::MAX);
647             let _ = mapper.start_export_session(&ex);
648             mapper
649                 .add_map(
650                     MappingInfo::new(0, GuestAddress(1000), 4, Protection::read_write()).unwrap(),
651                 )
652                 .unwrap();
653             assert_eq!(
654                 mapper.export(0, 1).unwrap()[0],
655                 MemRegion {
656                     gpa: GuestAddress(1000),
657                     len: 1,
658                     prot: Protection::read_write()
659                 }
660             );
661             mapper.export(9, 1).unwrap_err();
662             mapper.remove_map(0, 9).unwrap();
663             mapper.export(0, 1).unwrap_err();
664             mapper.export(9, 1).unwrap_err();
665         }
666         // #7
667         {
668             let mut mapper = BasicMemoryMapper::new(u64::MAX);
669             let _ = mapper.start_export_session(&ex);
670             mapper
671                 .add_map(MappingInfo::new(0, GuestAddress(1000), 4, Protection::read()).unwrap())
672                 .unwrap();
673             mapper
674                 .add_map(
675                     MappingInfo::new(10, GuestAddress(50), 4, Protection::read_write()).unwrap(),
676                 )
677                 .unwrap();
678             assert_eq!(
679                 mapper.export(0, 1).unwrap()[0],
680                 MemRegion {
681                     gpa: GuestAddress(1000),
682                     len: 1,
683                     prot: Protection::read()
684                 }
685             );
686             assert_eq!(
687                 mapper.export(3, 1).unwrap()[0],
688                 MemRegion {
689                     gpa: GuestAddress(1003),
690                     len: 1,
691                     prot: Protection::read()
692                 }
693             );
694             mapper.export(4, 1).unwrap_err();
695             assert_eq!(
696                 mapper.export(10, 1).unwrap()[0],
697                 MemRegion {
698                     gpa: GuestAddress(50),
699                     len: 1,
700                     prot: Protection::read_write()
701                 }
702             );
703             assert_eq!(
704                 mapper.export(13, 1).unwrap()[0],
705                 MemRegion {
706                     gpa: GuestAddress(53),
707                     len: 1,
708                     prot: Protection::read_write()
709                 }
710             );
711             mapper.remove_map(0, 14).unwrap();
712             mapper.export(0, 1).unwrap_err();
713             mapper.export(3, 1).unwrap_err();
714             mapper.export(4, 1).unwrap_err();
715             mapper.export(10, 1).unwrap_err();
716             mapper.export(13, 1).unwrap_err();
717         }
718     }
719     #[test]
test_remove_map()720     fn test_remove_map() {
721         let mut mapper = BasicMemoryMapper::new(u64::MAX);
722         mapper
723             .add_map(MappingInfo::new(1, GuestAddress(1000), 4, Protection::read()).unwrap())
724             .unwrap();
725         mapper
726             .add_map(MappingInfo::new(5, GuestAddress(50), 4, Protection::read_write()).unwrap())
727             .unwrap();
728         mapper
729             .add_map(MappingInfo::new(9, GuestAddress(50), 4, Protection::read_write()).unwrap())
730             .unwrap();
731         assert_eq!(mapper.len(), 3);
732         assert_overlap_failure(mapper.remove_map(0, 6).unwrap());
733         assert_eq!(mapper.len(), 3);
734         assert_overlap_failure(mapper.remove_map(1, 5).unwrap());
735         assert_eq!(mapper.len(), 3);
736         assert_overlap_failure(mapper.remove_map(1, 9).unwrap());
737         assert_eq!(mapper.len(), 3);
738         assert_overlap_failure(mapper.remove_map(6, 4).unwrap());
739         assert_eq!(mapper.len(), 3);
740         assert_overlap_failure(mapper.remove_map(6, 14).unwrap());
741         assert_eq!(mapper.len(), 3);
742         mapper.remove_map(5, 4).unwrap();
743         assert_eq!(mapper.len(), 2);
744         assert_overlap_failure(mapper.remove_map(1, 9).unwrap());
745         assert_eq!(mapper.len(), 2);
746         mapper.remove_map(0, 15).unwrap();
747         assert_eq!(mapper.len(), 0);
748     }
749 
assert_vec_eq<T: std::cmp::PartialEq + Debug>(a: Vec<T>, b: Vec<T>)750     fn assert_vec_eq<T: std::cmp::PartialEq + Debug>(a: Vec<T>, b: Vec<T>) {
751         assert_eq!(a.len(), b.len());
752         for (x, y) in a.into_iter().zip(b.into_iter()) {
753             assert_eq!(x, y);
754         }
755     }
756 
757     #[test]
test_translate_len()758     fn test_translate_len() {
759         let mut mapper = BasicMemoryMapper::new(u64::MAX);
760         let ex = Executor::new().expect("Failed to create an executor");
761         let _ = mapper.start_export_session(&ex);
762         // [1, 5) -> [1000, 1004)
763         mapper
764             .add_map(MappingInfo::new(1, GuestAddress(1000), 4, Protection::read()).unwrap())
765             .unwrap();
766         mapper.export(1, 0).unwrap_err();
767         assert_eq!(
768             mapper.export(1, 1).unwrap()[0],
769             MemRegion {
770                 gpa: GuestAddress(1000),
771                 len: 1,
772                 prot: Protection::read()
773             }
774         );
775         assert_eq!(
776             mapper.export(1, 2).unwrap()[0],
777             MemRegion {
778                 gpa: GuestAddress(1000),
779                 len: 2,
780                 prot: Protection::read()
781             }
782         );
783         assert_eq!(
784             mapper.export(1, 3).unwrap()[0],
785             MemRegion {
786                 gpa: GuestAddress(1000),
787                 len: 3,
788                 prot: Protection::read()
789             }
790         );
791         assert_eq!(
792             mapper.export(2, 1).unwrap()[0],
793             MemRegion {
794                 gpa: GuestAddress(1001),
795                 len: 1,
796                 prot: Protection::read()
797             }
798         );
799         assert_eq!(
800             mapper.export(2, 2).unwrap()[0],
801             MemRegion {
802                 gpa: GuestAddress(1001),
803                 len: 2,
804                 prot: Protection::read()
805             }
806         );
807         mapper.export(1, 5).unwrap_err();
808         // [1, 9) -> [1000, 1008)
809         mapper
810             .add_map(MappingInfo::new(5, GuestAddress(1004), 4, Protection::read()).unwrap())
811             .unwrap();
812         // Spanned across 2 maps
813         assert_eq!(
814             mapper.export(2, 5).unwrap()[0],
815             MemRegion {
816                 gpa: GuestAddress(1001),
817                 len: 5,
818                 prot: Protection::read()
819             }
820         );
821         assert_eq!(
822             mapper.export(2, 6).unwrap()[0],
823             MemRegion {
824                 gpa: GuestAddress(1001),
825                 len: 6,
826                 prot: Protection::read()
827             }
828         );
829         assert_eq!(
830             mapper.export(2, 7).unwrap()[0],
831             MemRegion {
832                 gpa: GuestAddress(1001),
833                 len: 7,
834                 prot: Protection::read()
835             }
836         );
837         mapper.export(2, 8).unwrap_err();
838         mapper.export(3, 10).unwrap_err();
839         // [1, 9) -> [1000, 1008), [11, 17) -> [1010, 1016)
840         mapper
841             .add_map(MappingInfo::new(11, GuestAddress(1010), 6, Protection::read()).unwrap())
842             .unwrap();
843         // Discontiguous iova
844         mapper.export(3, 10).unwrap_err();
845         // [1, 17) -> [1000, 1016)
846         mapper
847             .add_map(MappingInfo::new(9, GuestAddress(1008), 2, Protection::read()).unwrap())
848             .unwrap();
849         // Spanned across 4 maps
850         assert_eq!(
851             mapper.export(3, 10).unwrap()[0],
852             MemRegion {
853                 gpa: GuestAddress(1002),
854                 len: 10,
855                 prot: Protection::read()
856             }
857         );
858         assert_eq!(
859             mapper.export(1, 16).unwrap()[0],
860             MemRegion {
861                 gpa: GuestAddress(1000),
862                 len: 16,
863                 prot: Protection::read()
864             }
865         );
866         mapper.export(1, 17).unwrap_err();
867         mapper.export(0, 16).unwrap_err();
868         // [0, 1) -> [5, 6), [1, 17) -> [1000, 1016)
869         mapper
870             .add_map(MappingInfo::new(0, GuestAddress(5), 1, Protection::read()).unwrap())
871             .unwrap();
872         assert_eq!(
873             mapper.export(0, 1).unwrap()[0],
874             MemRegion {
875                 gpa: GuestAddress(5),
876                 len: 1,
877                 prot: Protection::read()
878             }
879         );
880         // Discontiguous gpa
881         assert_vec_eq(
882             mapper.export(0, 2).unwrap(),
883             vec![
884                 MemRegion {
885                     gpa: GuestAddress(5),
886                     len: 1,
887                     prot: Protection::read(),
888                 },
889                 MemRegion {
890                     gpa: GuestAddress(1000),
891                     len: 1,
892                     prot: Protection::read(),
893                 },
894             ],
895         );
896         assert_vec_eq(
897             mapper.export(0, 16).unwrap(),
898             vec![
899                 MemRegion {
900                     gpa: GuestAddress(5),
901                     len: 1,
902                     prot: Protection::read(),
903                 },
904                 MemRegion {
905                     gpa: GuestAddress(1000),
906                     len: 15,
907                     prot: Protection::read(),
908                 },
909             ],
910         );
911         // [0, 1) -> [5, 6), [1, 17) -> [1000, 1016), [17, 18) -> [1016, 1017) <RW>
912         mapper
913             .add_map(MappingInfo::new(17, GuestAddress(1016), 2, Protection::read_write()).unwrap())
914             .unwrap();
915         // Contiguous iova and gpa, but different perm
916         assert_vec_eq(
917             mapper.export(1, 17).unwrap(),
918             vec![
919                 MemRegion {
920                     gpa: GuestAddress(1000),
921                     len: 16,
922                     prot: Protection::read(),
923                 },
924                 MemRegion {
925                     gpa: GuestAddress(1016),
926                     len: 1,
927                     prot: Protection::read_write(),
928                 },
929             ],
930         );
931         // Contiguous iova and gpa, but different perm
932         assert_vec_eq(
933             mapper.export(2, 16).unwrap(),
934             vec![
935                 MemRegion {
936                     gpa: GuestAddress(1001),
937                     len: 15,
938                     prot: Protection::read(),
939                 },
940                 MemRegion {
941                     gpa: GuestAddress(1016),
942                     len: 1,
943                     prot: Protection::read_write(),
944                 },
945             ],
946         );
947         assert_vec_eq(
948             mapper.export(2, 17).unwrap(),
949             vec![
950                 MemRegion {
951                     gpa: GuestAddress(1001),
952                     len: 15,
953                     prot: Protection::read(),
954                 },
955                 MemRegion {
956                     gpa: GuestAddress(1016),
957                     len: 2,
958                     prot: Protection::read_write(),
959                 },
960             ],
961         );
962         mapper.export(2, 500).unwrap_err();
963         mapper.export(500, 5).unwrap_err();
964     }
965 }
966