xref: /aosp_15_r20/external/crosvm/vm_control/src/sys/linux.rs (revision bb4ee6a4ae7042d18b07a98463b9c8b875e44b39)
1 // Copyright 2022 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #[cfg(feature = "gpu")]
6 pub(crate) mod gpu;
7 
8 use std::path::Path;
9 use std::time::Duration;
10 
11 use base::error;
12 use base::AsRawDescriptor;
13 use base::Descriptor;
14 use base::Error as SysError;
15 use base::MemoryMappingArena;
16 use base::MmapError;
17 use base::Protection;
18 use base::SafeDescriptor;
19 use base::Tube;
20 use base::UnixSeqpacket;
21 use hypervisor::MemCacheType;
22 use hypervisor::MemSlot;
23 use hypervisor::Vm;
24 use libc::EINVAL;
25 use libc::ERANGE;
26 use once_cell::sync::Lazy;
27 use resources::Alloc;
28 use resources::SystemAllocator;
29 use serde::Deserialize;
30 use serde::Serialize;
31 use vm_memory::GuestAddress;
32 
33 use crate::client::HandleRequestResult;
34 use crate::VmMappedMemoryRegion;
35 use crate::VmRequest;
36 use crate::VmResponse;
37 
handle_request<T: AsRef<Path> + std::fmt::Debug>( request: &VmRequest, socket_path: T, ) -> HandleRequestResult38 pub fn handle_request<T: AsRef<Path> + std::fmt::Debug>(
39     request: &VmRequest,
40     socket_path: T,
41 ) -> HandleRequestResult {
42     handle_request_with_timeout(request, socket_path, None)
43 }
44 
handle_request_with_timeout<T: AsRef<Path> + std::fmt::Debug>( request: &VmRequest, socket_path: T, timeout: Option<Duration>, ) -> HandleRequestResult45 pub fn handle_request_with_timeout<T: AsRef<Path> + std::fmt::Debug>(
46     request: &VmRequest,
47     socket_path: T,
48     timeout: Option<Duration>,
49 ) -> HandleRequestResult {
50     match UnixSeqpacket::connect(&socket_path) {
51         Ok(s) => {
52             let socket = Tube::new_from_unix_seqpacket(s).map_err(|_| ())?;
53             if timeout.is_some() {
54                 if let Err(e) = socket.set_recv_timeout(timeout) {
55                     error!(
56                         "failed to set recv timeout on socket at '{:?}': {}",
57                         socket_path, e
58                     );
59                     return Err(());
60                 }
61             }
62             if let Err(e) = socket.send(request) {
63                 error!(
64                     "failed to send request to socket at '{:?}': {}",
65                     socket_path, e
66                 );
67                 return Err(());
68             }
69             match socket.recv() {
70                 Ok(response) => Ok(response),
71                 Err(e) => {
72                     error!(
73                         "failed to recv response from socket at '{:?}': {}",
74                         socket_path, e
75                     );
76                     Err(())
77                 }
78             }
79         }
80         Err(e) => {
81             error!("failed to connect to socket at '{:?}': {}", socket_path, e);
82             Err(())
83         }
84     }
85 }
86 
87 #[derive(Serialize, Deserialize, Debug)]
88 pub enum VmMemoryMappingRequest {
89     /// Flush the content of a memory mapping to its backing file.
90     /// `slot` selects the arena (as returned by `Vm::add_mmap_arena`).
91     /// `offset` is the offset of the mapping to sync within the arena.
92     /// `size` is the size of the mapping to sync within the arena.
93     MsyncArena {
94         slot: MemSlot,
95         offset: usize,
96         size: usize,
97     },
98 
99     /// Gives a MADV_PAGEOUT advice to the memory region mapped at `slot`, with the address range
100     /// starting at `offset` from the start of the region, and with size `size`.
101     MadvisePageout {
102         slot: MemSlot,
103         offset: usize,
104         size: usize,
105     },
106 
107     /// Gives a MADV_REMOVE advice to the memory region mapped at `slot`, with the address range
108     /// starting at `offset` from the start of the region, and with size `size`.
109     MadviseRemove {
110         slot: MemSlot,
111         offset: usize,
112         size: usize,
113     },
114 }
115 
116 #[derive(Serialize, Deserialize, Debug)]
117 pub enum VmMemoryMappingResponse {
118     Ok,
119     Err(SysError),
120 }
121 
122 impl VmMemoryMappingRequest {
123     /// Executes this request on the given Vm.
124     ///
125     /// # Arguments
126     /// * `vm` - The `Vm` to perform the request on.
127     ///
128     /// This does not return a result, instead encapsulating the success or failure in a
129     /// `VmMsyncResponse` with the intended purpose of sending the response back over the socket
130     /// that received this `VmMsyncResponse`.
execute(&self, vm: &mut impl Vm) -> VmMemoryMappingResponse131     pub fn execute(&self, vm: &mut impl Vm) -> VmMemoryMappingResponse {
132         use self::VmMemoryMappingRequest::*;
133         match *self {
134             MsyncArena { slot, offset, size } => match vm.msync_memory_region(slot, offset, size) {
135                 Ok(()) => VmMemoryMappingResponse::Ok,
136                 Err(e) => VmMemoryMappingResponse::Err(e),
137             },
138             MadvisePageout { slot, offset, size } => {
139                 match vm.madvise_pageout_memory_region(slot, offset, size) {
140                     Ok(()) => VmMemoryMappingResponse::Ok,
141                     Err(e) => VmMemoryMappingResponse::Err(e),
142                 }
143             }
144             MadviseRemove { slot, offset, size } => {
145                 match vm.madvise_remove_memory_region(slot, offset, size) {
146                     Ok(()) => VmMemoryMappingResponse::Ok,
147                     Err(e) => VmMemoryMappingResponse::Err(e),
148                 }
149             }
150         }
151     }
152 }
153 
154 #[derive(Serialize, Deserialize, Debug)]
155 pub enum FsMappingRequest {
156     /// Create an anonymous memory mapping that spans the entire region described by `Alloc`.
157     AllocateSharedMemoryRegion(Alloc),
158     /// Create a memory mapping.
159     CreateMemoryMapping {
160         /// The slot for a MemoryMappingArena, previously returned by a response to an
161         /// `AllocateSharedMemoryRegion` request.
162         slot: u32,
163         /// The file descriptor that should be mapped.
164         fd: SafeDescriptor,
165         /// The size of the mapping.
166         size: usize,
167         /// The offset into the file from where the mapping should start.
168         file_offset: u64,
169         /// The memory protection to be used for the mapping.  Protections other than readable and
170         /// writable will be silently dropped.
171         prot: Protection,
172         /// The offset into the shared memory region where the mapping should be placed.
173         mem_offset: usize,
174     },
175     /// Remove a memory mapping.
176     RemoveMemoryMapping {
177         /// The slot for a MemoryMappingArena.
178         slot: u32,
179         /// The offset into the shared memory region.
180         offset: usize,
181         /// The size of the mapping.
182         size: usize,
183     },
184 }
185 
prepare_shared_memory_region( vm: &mut dyn Vm, allocator: &mut SystemAllocator, alloc: Alloc, cache: MemCacheType, ) -> Result<VmMappedMemoryRegion, SysError>186 pub fn prepare_shared_memory_region(
187     vm: &mut dyn Vm,
188     allocator: &mut SystemAllocator,
189     alloc: Alloc,
190     cache: MemCacheType,
191 ) -> Result<VmMappedMemoryRegion, SysError> {
192     if !matches!(alloc, Alloc::PciBar { .. }) {
193         return Err(SysError::new(EINVAL));
194     }
195     match allocator.mmio_allocator_any().get(&alloc) {
196         Some((range, _)) => {
197             let size: usize = match range.len().and_then(|x| x.try_into().ok()) {
198                 Some(v) => v,
199                 None => return Err(SysError::new(ERANGE)),
200             };
201             let arena = match MemoryMappingArena::new(size) {
202                 Ok(a) => a,
203                 Err(MmapError::SystemCallFailed(e)) => return Err(e),
204                 _ => return Err(SysError::new(EINVAL)),
205             };
206 
207             match vm.add_memory_region(
208                 GuestAddress(range.start),
209                 Box::new(arena),
210                 false,
211                 false,
212                 cache,
213             ) {
214                 Ok(slot) => Ok(VmMappedMemoryRegion {
215                     guest_address: GuestAddress(range.start),
216                     slot,
217                 }),
218                 Err(e) => Err(e),
219             }
220         }
221         None => Err(SysError::new(EINVAL)),
222     }
223 }
224 
225 static SHOULD_PREPARE_MEMORY_REGION: Lazy<bool> = Lazy::new(|| {
226     if cfg!(target_arch = "x86_64") {
227         // The legacy x86 MMU allocates an rmap and a page tracking array
228         // that take 2.5MiB per 1GiB of user memory region address space,
229         // so avoid mapping the whole shared memory region if we're not
230         // using the tdp mmu.
231         match std::fs::read("/sys/module/kvm/parameters/tdp_mmu") {
232             Ok(bytes) if !bytes.is_empty() => bytes[0] == b'Y',
233             _ => false,
234         }
235     } else if cfg!(target_pointer_width = "64") {
236         true
237     } else {
238         // Not enough address space on 32-bit systems
239         false
240     }
241 });
242 
should_prepare_memory_region() -> bool243 pub fn should_prepare_memory_region() -> bool {
244     *SHOULD_PREPARE_MEMORY_REGION
245 }
246 
247 impl FsMappingRequest {
execute(&self, vm: &mut dyn Vm, allocator: &mut SystemAllocator) -> VmResponse248     pub fn execute(&self, vm: &mut dyn Vm, allocator: &mut SystemAllocator) -> VmResponse {
249         use self::FsMappingRequest::*;
250         match *self {
251             AllocateSharedMemoryRegion(alloc) => {
252                 match prepare_shared_memory_region(
253                     vm,
254                     allocator,
255                     alloc,
256                     MemCacheType::CacheCoherent,
257                 ) {
258                     Ok(VmMappedMemoryRegion { slot, .. }) => VmResponse::RegisterMemory { slot },
259                     Err(e) => VmResponse::Err(e),
260                 }
261             }
262             CreateMemoryMapping {
263                 slot,
264                 ref fd,
265                 size,
266                 file_offset,
267                 prot,
268                 mem_offset,
269             } => {
270                 let raw_fd: Descriptor = Descriptor(fd.as_raw_descriptor());
271 
272                 match vm.add_fd_mapping(slot, mem_offset, size, &raw_fd, file_offset, prot) {
273                     Ok(()) => VmResponse::Ok,
274                     Err(e) => VmResponse::Err(e),
275                 }
276             }
277             RemoveMemoryMapping { slot, offset, size } => {
278                 match vm.remove_mapping(slot, offset, size) {
279                     Ok(()) => VmResponse::Ok,
280                     Err(e) => VmResponse::Err(e),
281                 }
282             }
283         }
284     }
285 }
286