xref: /aosp_15_r20/external/crosvm/devices/src/virtio/iommu/sys/linux/vfio_wrapper.rs (revision bb4ee6a4ae7042d18b07a98463b9c8b875e44b39)
1 // Copyright 2022 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 //! Wraps VfioContainer for virtio-iommu implementation
6 
7 use std::sync::Arc;
8 
9 use anyhow::Context;
10 use base::AsRawDescriptor;
11 use base::AsRawDescriptors;
12 use base::Protection;
13 use base::RawDescriptor;
14 use sync::Mutex;
15 use vm_memory::GuestAddress;
16 use vm_memory::GuestMemory;
17 
18 use crate::vfio::VfioError;
19 use crate::virtio::iommu::memory_mapper::AddMapResult;
20 use crate::virtio::iommu::memory_mapper::MappingInfo;
21 use crate::virtio::iommu::memory_mapper::MemoryMapper;
22 use crate::virtio::iommu::memory_mapper::RemoveMapResult;
23 use crate::VfioContainer;
24 
25 pub struct VfioWrapper {
26     container: Arc<Mutex<VfioContainer>>,
27     // ID of the VFIO group which constitutes the container. Note that we rely on
28     // the fact that no container contains multiple groups.
29     id: u32,
30     mem: GuestMemory,
31 }
32 
33 impl VfioWrapper {
new(container: Arc<Mutex<VfioContainer>>, mem: GuestMemory) -> Self34     pub fn new(container: Arc<Mutex<VfioContainer>>, mem: GuestMemory) -> Self {
35         let c = container.lock();
36         let groups = c.group_ids();
37         // NOTE: vfio_get_container ensures each group gets its own container.
38         assert!(groups.len() == 1);
39         let id = *groups[0];
40         drop(c);
41         Self { container, id, mem }
42     }
43 
new_with_id(container: VfioContainer, id: u32, mem: GuestMemory) -> Self44     pub fn new_with_id(container: VfioContainer, id: u32, mem: GuestMemory) -> Self {
45         Self {
46             container: Arc::new(Mutex::new(container)),
47             id,
48             mem,
49         }
50     }
51 
clone_as_raw_descriptor(&self) -> Result<RawDescriptor, VfioError>52     pub fn clone_as_raw_descriptor(&self) -> Result<RawDescriptor, VfioError> {
53         self.container.lock().clone_as_raw_descriptor()
54     }
55 
do_map(&self, map: MappingInfo) -> anyhow::Result<AddMapResult>56     unsafe fn do_map(&self, map: MappingInfo) -> anyhow::Result<AddMapResult> {
57         let res = self.container.lock().vfio_dma_map(
58             map.iova,
59             map.size,
60             map.gpa.offset(),
61             map.prot.allows(&Protection::write()),
62         );
63         if let Err(VfioError::IommuDmaMap(err)) = res {
64             if err.errno() == libc::EEXIST {
65                 // A mapping already exists in the requested range,
66                 return Ok(AddMapResult::OverlapFailure);
67             }
68         }
69         res.context("vfio mapping error").map(|_| AddMapResult::Ok)
70     }
71 }
72 
73 impl MemoryMapper for VfioWrapper {
add_map(&mut self, mut map: MappingInfo) -> anyhow::Result<AddMapResult>74     fn add_map(&mut self, mut map: MappingInfo) -> anyhow::Result<AddMapResult> {
75         map.gpa = GuestAddress(
76             self.mem
77                 .get_host_address_range(map.gpa, map.size as usize)
78                 .context("failed to find host address")? as u64,
79         );
80 
81         // SAFETY:
82         // Safe because both guest and host address are guaranteed by
83         // get_host_address_range() to be valid.
84         unsafe { self.do_map(map) }
85     }
86 
vfio_dma_map( &mut self, iova: u64, hva: u64, size: u64, prot: Protection, ) -> anyhow::Result<AddMapResult>87     unsafe fn vfio_dma_map(
88         &mut self,
89         iova: u64,
90         hva: u64,
91         size: u64,
92         prot: Protection,
93     ) -> anyhow::Result<AddMapResult> {
94         self.do_map(MappingInfo {
95             iova,
96             gpa: GuestAddress(hva),
97             size,
98             prot,
99         })
100     }
101 
remove_map(&mut self, iova_start: u64, size: u64) -> anyhow::Result<RemoveMapResult>102     fn remove_map(&mut self, iova_start: u64, size: u64) -> anyhow::Result<RemoveMapResult> {
103         iova_start.checked_add(size).context("iova overflow")?;
104         self.container
105             .lock()
106             .vfio_dma_unmap(iova_start, size)
107             .context("vfio unmapping error")
108             .map(|_| RemoveMapResult::Success(None))
109     }
110 
get_mask(&self) -> anyhow::Result<u64>111     fn get_mask(&self) -> anyhow::Result<u64> {
112         self.container
113             .lock()
114             .vfio_get_iommu_page_size_mask()
115             .context("vfio get mask error")
116     }
117 
supports_detach(&self) -> bool118     fn supports_detach(&self) -> bool {
119         // A few reasons why we don't support detach:
120         //
121         // 1. Seems it's not possible to dynamically attach and detach a IOMMU domain if the virtio
122         //    IOMMU device is running on top of VFIO
123         // 2. Even if VIRTIO_IOMMU_T_DETACH is implemented in front-end driver, it could violate the
124         //    following virtio IOMMU spec: Detach an endpoint from a domain. when this request
125         //    completes, the endpoint cannot access any mapping from that domain anymore.
126         //
127         //    This is because VFIO doesn't support detaching a single device. When the virtio-iommu
128         //    device receives a VIRTIO_IOMMU_T_DETACH request, it can either to:
129         //    - detach a group: any other endpoints in the group lose access to the domain.
130         //    - do not detach the group at all: this breaks the above mentioned spec.
131         false
132     }
133 
id(&self) -> u32134     fn id(&self) -> u32 {
135         self.id
136     }
137 }
138 
139 impl AsRawDescriptors for VfioWrapper {
as_raw_descriptors(&self) -> Vec<RawDescriptor>140     fn as_raw_descriptors(&self) -> Vec<RawDescriptor> {
141         vec![self.container.lock().as_raw_descriptor()]
142     }
143 }
144