xref: /aosp_15_r20/external/crosvm/hypervisor/src/haxm/vm.rs (revision bb4ee6a4ae7042d18b07a98463b9c8b875e44b39)
1 // Copyright 2020 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 use core::ffi::c_void;
6 use std::cmp::Reverse;
7 use std::collections::BTreeMap;
8 use std::collections::BinaryHeap;
9 use std::sync::Arc;
10 
11 use base::errno_result;
12 use base::error;
13 use base::ioctl_with_mut_ref;
14 use base::ioctl_with_ref;
15 use base::warn;
16 use base::AsRawDescriptor;
17 use base::Error;
18 use base::Event;
19 use base::MappedRegion;
20 use base::MmapError;
21 use base::Protection;
22 use base::RawDescriptor;
23 use base::Result;
24 use base::SafeDescriptor;
25 use fnv::FnvHashMap;
26 use libc::E2BIG;
27 use libc::EEXIST;
28 use libc::EFAULT;
29 use libc::EINVAL;
30 use libc::EIO;
31 use libc::ENOENT;
32 use libc::ENOSPC;
33 use libc::ENOTSUP;
34 use libc::EOVERFLOW;
35 use sync::Mutex;
36 use vm_memory::GuestAddress;
37 use vm_memory::GuestMemory;
38 #[cfg(windows)]
39 use win_util::win32_wide_string;
40 
41 use super::*;
42 use crate::host_phys_addr_bits;
43 use crate::ClockState;
44 use crate::Datamatch;
45 use crate::DeviceKind;
46 use crate::Hypervisor;
47 use crate::IoEventAddress;
48 use crate::MemCacheType;
49 use crate::MemSlot;
50 use crate::VcpuX86_64;
51 use crate::Vm;
52 use crate::VmCap;
53 use crate::VmX86_64;
54 
55 /// A wrapper around creating and using a HAXM VM.
56 pub struct HaxmVm {
57     haxm: Haxm,
58     vm_id: u32,
59     descriptor: SafeDescriptor,
60     guest_mem: GuestMemory,
61     mem_regions: Arc<Mutex<BTreeMap<MemSlot, (GuestAddress, Box<dyn MappedRegion>)>>>,
62     /// A min heap of MemSlot numbers that were used and then removed and can now be re-used
63     mem_slot_gaps: Arc<Mutex<BinaryHeap<Reverse<MemSlot>>>>,
64     // HAXM's implementation of ioevents makes several assumptions about how crosvm uses ioevents:
65     //   1. All ioevents are registered during device setup, and thus can be cloned when the vm is
66     //      cloned instead of locked in an Arc<Mutex<>>. This will make handling ioevents in each
67     //      vcpu thread easier because no locks will need to be acquired.
68     //   2. All ioevents use Datamatch::AnyLength. We don't bother checking the datamatch, which
69     //      will make this faster.
70     //   3. We only ever register one eventfd to each address. This simplifies our data structure.
71     ioevents: FnvHashMap<IoEventAddress, Event>,
72 }
73 
74 impl HaxmVm {
75     /// Constructs a new `HaxmVm` using the given `Haxm` instance.
new(haxm: &Haxm, guest_mem: GuestMemory) -> Result<HaxmVm>76     pub fn new(haxm: &Haxm, guest_mem: GuestMemory) -> Result<HaxmVm> {
77         let mut vm_id: u32 = 0;
78         // SAFETY:
79         // Safe because we know descriptor is a real haxm descriptor as this module is the only
80         // one that can make Haxm objects.
81         let ret = unsafe { ioctl_with_mut_ref(haxm, HAX_IOCTL_CREATE_VM, &mut vm_id) };
82         if ret != 0 {
83             return errno_result();
84         }
85 
86         // Haxm creates additional device paths when VMs are created
87         let vm_descriptor = open_haxm_vm_device(USE_GHAXM.load(Ordering::Relaxed), vm_id)?;
88 
89         for region in guest_mem.regions() {
90             // SAFETY:
91             // Safe because the guest regions are guaranteed not to overlap.
92             unsafe {
93                 set_user_memory_region(
94                     &vm_descriptor,
95                     false,
96                     region.guest_addr.offset(),
97                     region.size as u64,
98                     MemoryRegionOp::Add(region.host_addr as *mut u8 as u64),
99                 )
100             }?;
101         }
102 
103         Ok(HaxmVm {
104             vm_id,
105             haxm: haxm.try_clone()?,
106             descriptor: vm_descriptor,
107             guest_mem,
108             mem_regions: Arc::new(Mutex::new(BTreeMap::new())),
109             mem_slot_gaps: Arc::new(Mutex::new(BinaryHeap::new())),
110             ioevents: FnvHashMap::default(),
111         })
112     }
113 
check_raw_capability(&self, cap: u32) -> bool114     pub fn check_raw_capability(&self, cap: u32) -> bool {
115         let mut capability_info = hax_capabilityinfo::default();
116         let ret =
117             // SAFETY:
118             // Safe because we know that our file is a VM fd and we verify the return result.
119             unsafe { ioctl_with_mut_ref(&self.haxm, HAX_IOCTL_CAPABILITY, &mut capability_info) };
120 
121         if ret != 0 {
122             return false;
123         }
124 
125         // If wstatus is zero, HAXM is not usable.
126         // In this case, the winfo bits indicate why, rather than communicating capability
127         // information.
128         if capability_info.wstatus == 0 {
129             return false;
130         }
131 
132         (cap & capability_info.winfo as u32) != 0
133     }
134 
register_log_file(&self, path: &str) -> Result<()>135     pub fn register_log_file(&self, path: &str) -> Result<()> {
136         // The IOCTL here is only avilable on internal fork of HAXM and only works on Windows.
137         #[cfg(windows)]
138         if get_use_ghaxm() {
139             let mut log_file = hax_log_file::default();
140 
141             // Although it would be more efficient to do this check prior to allocating the log_file
142             // struct, the code would be more complex and less maintainable. This is only ever
143             // called once per-vm so the extra temporary memory and time shouldn't be a
144             // problem.
145             if path.len() >= log_file.path.len() {
146                 return Err(Error::new(E2BIG));
147             }
148 
149             let wstring = &win32_wide_string(path);
150             log_file.path[..wstring.len()].clone_from_slice(wstring);
151 
152             // SAFETY:
153             // Safe because we know that our file is a VM fd and we verify the return result.
154             let ret = unsafe { ioctl_with_ref(self, HAX_VM_IOCTL_REGISTER_LOG_FILE, &log_file) };
155 
156             if ret != 0 {
157                 return errno_result();
158             }
159         }
160         Ok(())
161     }
162 }
163 
164 impl AsRawDescriptor for HaxmVm {
as_raw_descriptor(&self) -> RawDescriptor165     fn as_raw_descriptor(&self) -> RawDescriptor {
166         self.descriptor.as_raw_descriptor()
167     }
168 }
169 
170 enum MemoryRegionOp {
171     // Map a memory region for the given host address.
172     Add(u64),
173     // Remove the memory region.
174     Remove,
175 }
176 
set_user_memory_region( descriptor: &SafeDescriptor, read_only: bool, guest_addr: u64, size: u64, op: MemoryRegionOp, ) -> Result<()>177 unsafe fn set_user_memory_region(
178     descriptor: &SafeDescriptor,
179     read_only: bool,
180     guest_addr: u64,
181     size: u64,
182     op: MemoryRegionOp,
183 ) -> Result<()> {
184     let (va, flags) = match op {
185         MemoryRegionOp::Add(va) => {
186             let mut flags = HAX_RAM_INFO_STANDALONE;
187             if read_only {
188                 flags |= HAX_RAM_INFO_ROM
189             }
190             (va, flags)
191         }
192         MemoryRegionOp::Remove => (0, HAX_RAM_INFO_INVALID),
193     };
194     let ram_info = hax_set_ram_info2 {
195         pa_start: guest_addr,
196         size,
197         va,
198         flags,
199         ..Default::default()
200     };
201 
202     // SAFETY:
203     // Safe because we know that our file is a VM fd and we verify the return result.
204     let ret = ioctl_with_ref(descriptor, HAX_VM_IOCTL_SET_RAM2, &ram_info);
205     if ret != 0 {
206         return errno_result();
207     }
208     Ok(())
209 }
210 
211 impl Vm for HaxmVm {
try_clone(&self) -> Result<Self>212     fn try_clone(&self) -> Result<Self> {
213         let mut ioevents = FnvHashMap::default();
214         for (addr, evt) in self.ioevents.iter() {
215             ioevents.insert(*addr, evt.try_clone()?);
216         }
217         Ok(HaxmVm {
218             vm_id: self.vm_id,
219             haxm: self.haxm.try_clone()?,
220             descriptor: self.descriptor.try_clone()?,
221             guest_mem: self.guest_mem.clone(),
222             mem_regions: self.mem_regions.clone(),
223             mem_slot_gaps: self.mem_slot_gaps.clone(),
224             ioevents,
225         })
226     }
227 
check_capability(&self, c: VmCap) -> bool228     fn check_capability(&self, c: VmCap) -> bool {
229         match c {
230             VmCap::DirtyLog => false,
231             VmCap::PvClock => false,
232             VmCap::Protected => false,
233             VmCap::EarlyInitCpuid => false,
234             VmCap::BusLockDetect => false,
235             VmCap::ReadOnlyMemoryRegion => false,
236             VmCap::MemNoncoherentDma => false,
237         }
238     }
239 
get_memory(&self) -> &GuestMemory240     fn get_memory(&self) -> &GuestMemory {
241         &self.guest_mem
242     }
243 
add_memory_region( &mut self, guest_addr: GuestAddress, mem: Box<dyn MappedRegion>, read_only: bool, _log_dirty_pages: bool, _cache: MemCacheType, ) -> Result<MemSlot>244     fn add_memory_region(
245         &mut self,
246         guest_addr: GuestAddress,
247         mem: Box<dyn MappedRegion>,
248         read_only: bool,
249         _log_dirty_pages: bool,
250         _cache: MemCacheType,
251     ) -> Result<MemSlot> {
252         let size = mem.size() as u64;
253         let end_addr = guest_addr.checked_add(size).ok_or(Error::new(EOVERFLOW))?;
254         if self.guest_mem.range_overlap(guest_addr, end_addr) {
255             return Err(Error::new(ENOSPC));
256         }
257         let mut regions = self.mem_regions.lock();
258         let mut gaps = self.mem_slot_gaps.lock();
259         let slot = match gaps.pop() {
260             Some(gap) => gap.0,
261             None => (regions.len() + self.guest_mem.num_regions() as usize) as MemSlot,
262         };
263 
264         // SAFETY:
265         // Safe because we check that the given guest address is valid and has no overlaps. We also
266         // know that the pointer and size are correct because the MemoryMapping interface ensures
267         // this. We take ownership of the memory mapping so that it won't be unmapped until the slot
268         // is removed.
269         let res = unsafe {
270             set_user_memory_region(
271                 &self.descriptor,
272                 read_only,
273                 guest_addr.offset(),
274                 size,
275                 MemoryRegionOp::Add(mem.as_ptr() as u64),
276             )
277         };
278 
279         if let Err(e) = res {
280             gaps.push(Reverse(slot));
281             return Err(e);
282         }
283         regions.insert(slot, (guest_addr, mem));
284         Ok(slot)
285     }
286 
msync_memory_region(&mut self, slot: MemSlot, offset: usize, size: usize) -> Result<()>287     fn msync_memory_region(&mut self, slot: MemSlot, offset: usize, size: usize) -> Result<()> {
288         let mut regions = self.mem_regions.lock();
289         let (_, mem) = regions.get_mut(&slot).ok_or(Error::new(ENOENT))?;
290 
291         mem.msync(offset, size).map_err(|err| match err {
292             MmapError::InvalidAddress => Error::new(EFAULT),
293             MmapError::NotPageAligned => Error::new(EINVAL),
294             MmapError::SystemCallFailed(e) => e,
295             _ => Error::new(EIO),
296         })
297     }
298 
remove_memory_region(&mut self, slot: MemSlot) -> Result<Box<dyn MappedRegion>>299     fn remove_memory_region(&mut self, slot: MemSlot) -> Result<Box<dyn MappedRegion>> {
300         let mut regions = self.mem_regions.lock();
301 
302         if let Some((guest_addr, mem)) = regions.get(&slot) {
303             // SAFETY:
304             // Safe because the slot is checked against the list of memory slots.
305             unsafe {
306                 set_user_memory_region(
307                     &self.descriptor,
308                     false,
309                     guest_addr.offset(),
310                     mem.size() as u64,
311                     MemoryRegionOp::Remove,
312                 )?;
313             }
314             self.mem_slot_gaps.lock().push(Reverse(slot));
315             Ok(regions.remove(&slot).unwrap().1)
316         } else {
317             Err(Error::new(ENOENT))
318         }
319     }
320 
create_device(&self, _kind: DeviceKind) -> Result<SafeDescriptor>321     fn create_device(&self, _kind: DeviceKind) -> Result<SafeDescriptor> {
322         // Haxm does not support in-kernel devices
323         Err(Error::new(libc::ENXIO))
324     }
325 
get_dirty_log(&self, _slot: u32, _dirty_log: &mut [u8]) -> Result<()>326     fn get_dirty_log(&self, _slot: u32, _dirty_log: &mut [u8]) -> Result<()> {
327         // Haxm does not support VmCap::DirtyLog
328         Err(Error::new(libc::ENXIO))
329     }
330 
register_ioevent( &mut self, evt: &Event, addr: IoEventAddress, datamatch: Datamatch, ) -> Result<()>331     fn register_ioevent(
332         &mut self,
333         evt: &Event,
334         addr: IoEventAddress,
335         datamatch: Datamatch,
336     ) -> Result<()> {
337         if datamatch != Datamatch::AnyLength {
338             error!("HAXM currently only supports Datamatch::AnyLength");
339             return Err(Error::new(ENOTSUP));
340         }
341 
342         if self.ioevents.contains_key(&addr) {
343             error!("HAXM does not support multiple ioevents for the same address");
344             return Err(Error::new(EEXIST));
345         }
346 
347         self.ioevents.insert(addr, evt.try_clone()?);
348 
349         Ok(())
350     }
351 
unregister_ioevent( &mut self, evt: &Event, addr: IoEventAddress, datamatch: Datamatch, ) -> Result<()>352     fn unregister_ioevent(
353         &mut self,
354         evt: &Event,
355         addr: IoEventAddress,
356         datamatch: Datamatch,
357     ) -> Result<()> {
358         if datamatch != Datamatch::AnyLength {
359             error!("HAXM only supports Datamatch::AnyLength");
360             return Err(Error::new(ENOTSUP));
361         }
362 
363         match self.ioevents.get(&addr) {
364             Some(existing_evt) => {
365                 // evt should match the existing evt associated with addr
366                 if evt != existing_evt {
367                     return Err(Error::new(ENOENT));
368                 }
369                 self.ioevents.remove(&addr);
370             }
371 
372             None => {
373                 return Err(Error::new(ENOENT));
374             }
375         };
376         Ok(())
377     }
378 
379     /// Trigger any io events based on the memory mapped IO at `addr`.  If the hypervisor does
380     /// in-kernel IO event delivery, this is a no-op.
handle_io_events(&self, addr: IoEventAddress, _data: &[u8]) -> Result<()>381     fn handle_io_events(&self, addr: IoEventAddress, _data: &[u8]) -> Result<()> {
382         if let Some(evt) = self.ioevents.get(&addr) {
383             evt.signal()?;
384         }
385         Ok(())
386     }
387 
get_pvclock(&self) -> Result<ClockState>388     fn get_pvclock(&self) -> Result<ClockState> {
389         // Haxm does not support VmCap::PvClock
390         Err(Error::new(libc::ENXIO))
391     }
392 
set_pvclock(&self, _state: &ClockState) -> Result<()>393     fn set_pvclock(&self, _state: &ClockState) -> Result<()> {
394         // Haxm does not support VmCap::PvClock
395         Err(Error::new(libc::ENXIO))
396     }
397 
add_fd_mapping( &mut self, slot: u32, offset: usize, size: usize, fd: &dyn AsRawDescriptor, fd_offset: u64, prot: Protection, ) -> Result<()>398     fn add_fd_mapping(
399         &mut self,
400         slot: u32,
401         offset: usize,
402         size: usize,
403         fd: &dyn AsRawDescriptor,
404         fd_offset: u64,
405         prot: Protection,
406     ) -> Result<()> {
407         let mut regions = self.mem_regions.lock();
408         let (_, region) = regions.get_mut(&slot).ok_or(Error::new(EINVAL))?;
409 
410         match region.add_fd_mapping(offset, size, fd, fd_offset, prot) {
411             Ok(()) => Ok(()),
412             Err(MmapError::SystemCallFailed(e)) => Err(e),
413             Err(_) => Err(Error::new(EIO)),
414         }
415     }
416 
remove_mapping(&mut self, slot: u32, offset: usize, size: usize) -> Result<()>417     fn remove_mapping(&mut self, slot: u32, offset: usize, size: usize) -> Result<()> {
418         let mut regions = self.mem_regions.lock();
419         let (_, region) = regions.get_mut(&slot).ok_or(Error::new(EINVAL))?;
420 
421         match region.remove_mapping(offset, size) {
422             Ok(()) => Ok(()),
423             Err(MmapError::SystemCallFailed(e)) => Err(e),
424             Err(_) => Err(Error::new(EIO)),
425         }
426     }
427 
handle_balloon_event(&mut self, _event: crate::BalloonEvent) -> Result<()>428     fn handle_balloon_event(&mut self, _event: crate::BalloonEvent) -> Result<()> {
429         // TODO(b/233773610): implement ballooning support in haxm
430         warn!("Memory ballooning attempted but not supported on haxm hypervisor");
431         // no-op
432         Ok(())
433     }
434 
get_guest_phys_addr_bits(&self) -> u8435     fn get_guest_phys_addr_bits(&self) -> u8 {
436         // Assume the guest physical address size is the same as the host.
437         host_phys_addr_bits()
438     }
439 }
440 
441 impl VmX86_64 for HaxmVm {
get_hypervisor(&self) -> &dyn HypervisorX86_64442     fn get_hypervisor(&self) -> &dyn HypervisorX86_64 {
443         &self.haxm
444     }
445 
create_vcpu(&self, id: usize) -> Result<Box<dyn VcpuX86_64>>446     fn create_vcpu(&self, id: usize) -> Result<Box<dyn VcpuX86_64>> {
447         // SAFETY:
448         // Safe because we know that our file is a VM fd and we verify the return result.
449         let fd = unsafe { ioctl_with_ref(self, HAX_VM_IOCTL_VCPU_CREATE, &(id as u32)) };
450         if fd < 0 {
451             return errno_result();
452         }
453 
454         let descriptor =
455             open_haxm_vcpu_device(USE_GHAXM.load(Ordering::Relaxed), self.vm_id, id as u32)?;
456 
457         let mut tunnel_info = hax_tunnel_info::default();
458 
459         // SAFETY:
460         // Safe because we created tunnel_info and we check the return code for errors
461         let ret = unsafe {
462             ioctl_with_mut_ref(&descriptor, HAX_VCPU_IOCTL_SETUP_TUNNEL, &mut tunnel_info)
463         };
464 
465         if ret != 0 {
466             return errno_result();
467         }
468 
469         Ok(Box::new(HaxmVcpu {
470             descriptor,
471             id,
472             tunnel: tunnel_info.va as *mut hax_tunnel,
473             io_buffer: tunnel_info.io_va as *mut c_void,
474         }))
475     }
476 
477     /// Sets the address of the three-page region in the VM's address space.
478     /// This function is only necessary for 16 bit guests, which we do not support for HAXM.
set_tss_addr(&self, _addr: GuestAddress) -> Result<()>479     fn set_tss_addr(&self, _addr: GuestAddress) -> Result<()> {
480         Ok(())
481     }
482 
483     /// Sets the address of a one-page region in the VM's address space.
484     /// This function is only necessary for 16 bit guests, which we do not support for HAXM.
set_identity_map_addr(&self, _addr: GuestAddress) -> Result<()>485     fn set_identity_map_addr(&self, _addr: GuestAddress) -> Result<()> {
486         Ok(())
487     }
488 }
489 
490 // TODO(b:241252288): Enable tests disabled with dummy feature flag - enable_haxm_tests.
491 #[cfg(test)]
492 #[cfg(feature = "enable_haxm_tests")]
493 mod tests {
494     use std::time::Duration;
495 
496     use base::EventWaitResult;
497     use base::MemoryMappingBuilder;
498     use base::SharedMemory;
499 
500     use super::*;
501 
502     #[test]
create_vm()503     fn create_vm() {
504         let haxm = Haxm::new().expect("failed to instantiate HAXM");
505         let mem =
506             GuestMemory::new(&[(GuestAddress(0), 0x1000)]).expect("failed to create guest memory");
507         HaxmVm::new(&haxm, mem).expect("failed to create vm");
508     }
509 
510     #[test]
create_vcpu()511     fn create_vcpu() {
512         let haxm = Haxm::new().expect("failed to instantiate HAXM");
513         let mem =
514             GuestMemory::new(&[(GuestAddress(0), 0x1000)]).expect("failed to create guest memory");
515         let vm = HaxmVm::new(&haxm, mem).expect("failed to create vm");
516         vm.create_vcpu(0).expect("failed to create vcpu");
517     }
518 
519     #[test]
register_ioevent()520     fn register_ioevent() {
521         let haxm = Haxm::new().expect("failed to create haxm");
522         let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
523         let mut vm = HaxmVm::new(&haxm, gm).expect("failed to create vm");
524         let evt = Event::new().expect("failed to create event");
525         let otherevt = Event::new().expect("failed to create event");
526         vm.register_ioevent(&evt, IoEventAddress::Pio(0xf4), Datamatch::AnyLength)
527             .unwrap();
528         vm.register_ioevent(&evt, IoEventAddress::Mmio(0x1000), Datamatch::AnyLength)
529             .unwrap();
530 
531         vm.register_ioevent(
532             &otherevt,
533             IoEventAddress::Mmio(0x1000),
534             Datamatch::AnyLength,
535         )
536         .expect_err("HAXM should not allow you to register two events for the same address");
537 
538         vm.register_ioevent(
539             &otherevt,
540             IoEventAddress::Mmio(0x1000),
541             Datamatch::U8(None),
542         )
543         .expect_err(
544             "HAXM should not allow you to register ioevents with Datamatches other than AnyLength",
545         );
546 
547         vm.register_ioevent(
548             &otherevt,
549             IoEventAddress::Mmio(0x1000),
550             Datamatch::U32(Some(0xf6)),
551         )
552         .expect_err(
553             "HAXM should not allow you to register ioevents with Datamatches other than AnyLength",
554         );
555 
556         vm.unregister_ioevent(&otherevt, IoEventAddress::Pio(0xf4), Datamatch::AnyLength)
557             .expect_err("unregistering an unknown event should fail");
558         vm.unregister_ioevent(&evt, IoEventAddress::Pio(0xf5), Datamatch::AnyLength)
559             .expect_err("unregistering an unknown PIO address should fail");
560         vm.unregister_ioevent(&evt, IoEventAddress::Pio(0x1000), Datamatch::AnyLength)
561             .expect_err("unregistering an unknown PIO address should fail");
562         vm.unregister_ioevent(&evt, IoEventAddress::Mmio(0xf4), Datamatch::AnyLength)
563             .expect_err("unregistering an unknown MMIO address should fail");
564         vm.unregister_ioevent(&evt, IoEventAddress::Pio(0xf4), Datamatch::AnyLength)
565             .unwrap();
566         vm.unregister_ioevent(&evt, IoEventAddress::Mmio(0x1000), Datamatch::AnyLength)
567             .unwrap();
568     }
569 
570     #[test]
handle_io_events()571     fn handle_io_events() {
572         let haxm = Haxm::new().expect("failed to create haxm");
573         let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
574         let mut vm = HaxmVm::new(&haxm, gm).expect("failed to create vm");
575         let evt = Event::new().expect("failed to create event");
576         let evt2 = Event::new().expect("failed to create event");
577         vm.register_ioevent(&evt, IoEventAddress::Pio(0x1000), Datamatch::AnyLength)
578             .unwrap();
579         vm.register_ioevent(&evt2, IoEventAddress::Mmio(0x1000), Datamatch::AnyLength)
580             .unwrap();
581 
582         // Check a pio address
583         vm.handle_io_events(IoEventAddress::Pio(0x1000), &[])
584             .expect("failed to handle_io_events");
585         assert_ne!(
586             evt.wait_timeout(Duration::from_millis(10))
587                 .expect("failed to read event"),
588             EventWaitResult::TimedOut
589         );
590         assert_eq!(
591             evt2.wait_timeout(Duration::from_millis(10))
592                 .expect("failed to read event"),
593             EventWaitResult::TimedOut
594         );
595         // Check an mmio address
596         vm.handle_io_events(IoEventAddress::Mmio(0x1000), &[])
597             .expect("failed to handle_io_events");
598         assert_eq!(
599             evt.wait_timeout(Duration::from_millis(10))
600                 .expect("failed to read event"),
601             EventWaitResult::TimedOut
602         );
603         assert_ne!(
604             evt2.wait_timeout(Duration::from_millis(10))
605                 .expect("failed to read event"),
606             EventWaitResult::TimedOut
607         );
608 
609         // Check an address that does not match any registered ioevents
610         vm.handle_io_events(IoEventAddress::Pio(0x1001), &[])
611             .expect("failed to handle_io_events");
612         assert_eq!(
613             evt.wait_timeout(Duration::from_millis(10))
614                 .expect("failed to read event"),
615             EventWaitResult::TimedOut
616         );
617         assert_eq!(
618             evt2.wait_timeout(Duration::from_millis(10))
619                 .expect("failed to read event"),
620             EventWaitResult::TimedOut
621         );
622     }
623 
624     #[test]
remove_memory()625     fn remove_memory() {
626         let haxm = Haxm::new().unwrap();
627         let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
628         let mut vm = HaxmVm::new(&haxm, gm).unwrap();
629         let mem_size = 0x1000;
630         let shm = SharedMemory::new("test", mem_size as u64).unwrap();
631         let mem = MemoryMappingBuilder::new(mem_size)
632             .from_shared_memory(&shm)
633             .build()
634             .unwrap();
635         let mem_ptr = mem.as_ptr();
636         let slot = vm
637             .add_memory_region(
638                 GuestAddress(0x1000),
639                 Box::new(mem),
640                 false,
641                 false,
642                 MemCacheType::CacheCoherent,
643             )
644             .unwrap();
645         let removed_mem = vm.remove_memory_region(slot).unwrap();
646         assert_eq!(removed_mem.size(), mem_size);
647         assert_eq!(removed_mem.as_ptr(), mem_ptr);
648     }
649 
650     #[cfg(windows)]
651     #[test]
register_log_file()652     fn register_log_file() {
653         let haxm = Haxm::new().unwrap();
654         let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
655         let vm = HaxmVm::new(&haxm, gm).unwrap();
656 
657         if !vm.check_raw_capability(HAX_CAP_VM_LOG) {
658             return;
659         }
660 
661         let dir = tempfile::TempDir::new().unwrap();
662         let mut file_path = dir.path().to_owned();
663         file_path.push("test");
664 
665         vm.register_log_file(file_path.to_str().unwrap())
666             .expect("failed to register log file");
667 
668         let vcpu = vm.create_vcpu(0).expect("failed to create vcpu");
669 
670         // Setting cpuid will force some logs
671         let cpuid = haxm.get_supported_cpuid().unwrap();
672         vcpu.set_cpuid(&cpuid).expect("failed to set cpuid");
673 
674         assert!(file_path.exists());
675     }
676 }
677