xref: /aosp_15_r20/external/crosvm/devices/src/virtio/video/resource.rs (revision bb4ee6a4ae7042d18b07a98463b9c8b875e44b39)
1 // Copyright 2021 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 //! Resource management and resolution for the virtio-video device.
6 
7 use std::convert::TryInto;
8 use std::fmt;
9 
10 use base::linux::MemoryMappingBuilderUnix;
11 use base::FromRawDescriptor;
12 use base::IntoRawDescriptor;
13 use base::MemoryMappingArena;
14 use base::MemoryMappingBuilder;
15 use base::MmapError;
16 use base::SafeDescriptor;
17 use thiserror::Error as ThisError;
18 use vm_memory::GuestAddress;
19 use vm_memory::GuestMemory;
20 use vm_memory::GuestMemoryError;
21 use zerocopy::AsBytes;
22 use zerocopy::FromBytes;
23 use zerocopy::FromZeroes;
24 
25 use crate::virtio::resource_bridge;
26 use crate::virtio::resource_bridge::ResourceBridgeError;
27 use crate::virtio::resource_bridge::ResourceInfo;
28 use crate::virtio::resource_bridge::ResourceRequest;
29 use crate::virtio::video::format::Format;
30 use crate::virtio::video::format::FramePlane;
31 use crate::virtio::video::params::Params;
32 use crate::virtio::video::protocol::virtio_video_mem_entry;
33 use crate::virtio::video::protocol::virtio_video_object_entry;
34 
35 /// Defines how resources for a given queue are represented.
36 #[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
37 pub enum ResourceType {
38     /// Resources are backed by guest memory pages.
39     GuestPages,
40     /// Resources are backed by virtio objects.
41     #[default]
42     VirtioObject,
43 }
44 
45 #[repr(C)]
46 #[derive(Clone, Copy, AsBytes, FromZeroes, FromBytes)]
47 /// A guest resource entry which type is not decided yet.
48 pub union UnresolvedResourceEntry {
49     pub object: virtio_video_object_entry,
50     pub guest_mem: virtio_video_mem_entry,
51 }
52 
53 impl fmt::Debug for UnresolvedResourceEntry {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result54     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
55         write!(
56             f,
57             "unresolved {:?} or {:?}",
58             // SAFETY:
59             // Safe because `self.object` and `self.guest_mem` are the same size and both made of
60             // integers, making it safe to display them no matter their value.
61             unsafe { self.object },
62             // SAFETY:
63             // Safe because `self.object` and `self.guest_mem` are the same size and both made of
64             // integers, making it safe to display them no matter their value.
65             unsafe { self.guest_mem }
66         )
67     }
68 }
69 
70 /// Trait for types that can serve as video buffer backing memory.
71 pub trait BufferHandle: Sized {
72     /// Try to clone this handle. This must only create a new reference to the same backing memory
73     /// and not duplicate the buffer itself.
try_clone(&self) -> Result<Self, base::Error>74     fn try_clone(&self) -> Result<Self, base::Error>;
75 
76     /// Returns a linear mapping of [`offset`..`offset`+`size`] of the memory backing this buffer.
get_mapping(&self, offset: usize, size: usize) -> Result<MemoryMappingArena, MmapError>77     fn get_mapping(&self, offset: usize, size: usize) -> Result<MemoryMappingArena, MmapError>;
78 }
79 
80 /// Linear memory area of a `GuestMemHandle`
81 #[derive(Clone)]
82 pub struct GuestMemArea {
83     /// Offset within the guest region to the start of the area.
84     pub offset: u64,
85     /// Length of the area within the memory region.
86     pub length: usize,
87 }
88 
89 pub struct GuestMemHandle {
90     /// Descriptor to the guest memory region containing the buffer.
91     pub desc: SafeDescriptor,
92     /// Memory areas (i.e. sg list) that make the memory buffer.
93     pub mem_areas: Vec<GuestMemArea>,
94 }
95 
96 impl BufferHandle for GuestMemHandle {
try_clone(&self) -> Result<Self, base::Error>97     fn try_clone(&self) -> Result<Self, base::Error> {
98         Ok(Self {
99             desc: self.desc.try_clone()?,
100             mem_areas: self.mem_areas.clone(),
101         })
102     }
103 
get_mapping(&self, offset: usize, size: usize) -> Result<MemoryMappingArena, MmapError>104     fn get_mapping(&self, offset: usize, size: usize) -> Result<MemoryMappingArena, MmapError> {
105         let mut arena = MemoryMappingArena::new(size)?;
106         let mut mapped_size = 0;
107         let mut area_iter = self.mem_areas.iter();
108         let mut area_offset = offset;
109         while mapped_size < size {
110             let area = match area_iter.next() {
111                 Some(area) => area,
112                 None => {
113                     return Err(MmapError::InvalidRange(
114                         offset,
115                         size,
116                         self.mem_areas.iter().map(|a| a.length).sum(),
117                     ));
118                 }
119             };
120             if area_offset > area.length {
121                 area_offset -= area.length;
122             } else {
123                 let mapping_length = std::cmp::min(area.length - area_offset, size - mapped_size);
124                 arena.add_fd_offset(mapped_size, mapping_length, &self.desc, area.offset)?;
125                 mapped_size += mapping_length;
126                 area_offset = 0;
127             }
128         }
129         Ok(arena)
130     }
131 }
132 
133 pub struct VirtioObjectHandle {
134     /// Descriptor for the object.
135     pub desc: SafeDescriptor,
136     /// Modifier to apply to frame resources.
137     pub modifier: u64,
138 }
139 
140 impl BufferHandle for VirtioObjectHandle {
try_clone(&self) -> Result<Self, base::Error>141     fn try_clone(&self) -> Result<Self, base::Error> {
142         Ok(Self {
143             desc: self.desc.try_clone()?,
144             modifier: self.modifier,
145         })
146     }
147 
get_mapping(&self, offset: usize, size: usize) -> Result<MemoryMappingArena, MmapError>148     fn get_mapping(&self, offset: usize, size: usize) -> Result<MemoryMappingArena, MmapError> {
149         MemoryMappingBuilder::new(size)
150             .from_descriptor(&self.desc)
151             .offset(offset as u64)
152             .build()
153             .map(MemoryMappingArena::from)
154     }
155 }
156 
157 pub enum GuestResourceHandle {
158     GuestPages(GuestMemHandle),
159     VirtioObject(VirtioObjectHandle),
160 }
161 
162 impl BufferHandle for GuestResourceHandle {
try_clone(&self) -> Result<Self, base::Error>163     fn try_clone(&self) -> Result<Self, base::Error> {
164         Ok(match self {
165             Self::GuestPages(handle) => Self::GuestPages(handle.try_clone()?),
166             Self::VirtioObject(handle) => Self::VirtioObject(handle.try_clone()?),
167         })
168     }
169 
get_mapping(&self, offset: usize, size: usize) -> Result<MemoryMappingArena, MmapError>170     fn get_mapping(&self, offset: usize, size: usize) -> Result<MemoryMappingArena, MmapError> {
171         match self {
172             GuestResourceHandle::GuestPages(handle) => handle.get_mapping(offset, size),
173             GuestResourceHandle::VirtioObject(handle) => handle.get_mapping(offset, size),
174         }
175     }
176 }
177 
178 pub struct GuestResource {
179     /// Handle to the backing memory.
180     pub handle: GuestResourceHandle,
181     /// Layout of color planes, if the resource will receive frames.
182     pub planes: Vec<FramePlane>,
183     pub width: u32,
184     pub height: u32,
185     pub format: Format,
186     /// Whether the buffer can be accessed by the guest CPU. This means the host must ensure that
187     /// all operations on the buffer are completed before passing it to the guest.
188     pub guest_cpu_mappable: bool,
189 }
190 
191 #[derive(Debug, ThisError)]
192 pub enum GuestMemResourceCreationError {
193     #[error("Provided slice of entries is empty")]
194     NoEntriesProvided,
195     #[error("cannot get shm region: {0}")]
196     CantGetShmRegion(GuestMemoryError),
197     #[error("cannot get shm offset: {0}")]
198     CantGetShmOffset(GuestMemoryError),
199     #[error("error while cloning shm region descriptor: {0}")]
200     DescriptorCloneError(base::Error),
201 }
202 
203 #[derive(Debug, ThisError)]
204 pub enum ObjectResourceCreationError {
205     #[error("uuid {0:08} is larger than 32 bits")]
206     UuidNot32Bits(u128),
207     #[error("resource returned by bridge is not a buffer")]
208     NotABuffer,
209     #[error("resource bridge failure: {0}")]
210     ResourceBridgeFailure(ResourceBridgeError),
211 }
212 
213 impl GuestResource {
214     /// Try to convert an unresolved virtio guest memory entry into a resolved guest memory
215     /// resource.
216     ///
217     /// Convert `mem_entry` into the guest memory resource it represents and resolve it through
218     /// `mem`.
219     /// Width, height and format is set from `params`.
220     ///
221     /// Panics if `params.format` is `None`.
from_virtio_guest_mem_entry( mem_entries: &[virtio_video_mem_entry], mem: &GuestMemory, params: &Params, ) -> Result<GuestResource, GuestMemResourceCreationError>222     pub fn from_virtio_guest_mem_entry(
223         mem_entries: &[virtio_video_mem_entry],
224         mem: &GuestMemory,
225         params: &Params,
226     ) -> Result<GuestResource, GuestMemResourceCreationError> {
227         let region_desc = match mem_entries.first() {
228             None => return Err(GuestMemResourceCreationError::NoEntriesProvided),
229             Some(entry) => {
230                 let addr: u64 = entry.addr.into();
231 
232                 let guest_region = mem
233                     .shm_region(GuestAddress(addr))
234                     .map_err(GuestMemResourceCreationError::CantGetShmRegion)?;
235                 base::clone_descriptor(guest_region)
236                     .map_err(GuestMemResourceCreationError::DescriptorCloneError)?
237             }
238         };
239 
240         let mem_areas = mem_entries
241             .iter()
242             .map(|entry| {
243                 let addr: u64 = entry.addr.into();
244                 let length: u32 = entry.length.into();
245                 let region_offset = mem
246                     .offset_from_base(GuestAddress(addr))
247                     .map_err(GuestMemResourceCreationError::CantGetShmOffset)
248                     .unwrap();
249 
250                 GuestMemArea {
251                     offset: region_offset,
252                     length: length as usize,
253                 }
254             })
255             .collect();
256 
257         let handle = GuestResourceHandle::GuestPages(GuestMemHandle {
258             desc: region_desc,
259             mem_areas,
260         });
261 
262         // The plane information can be computed from the currently set format.
263         let mut buffer_offset = 0;
264         let planes = params
265             .plane_formats
266             .iter()
267             .map(|p| {
268                 let plane_offset = buffer_offset;
269                 buffer_offset += p.plane_size;
270 
271                 FramePlane {
272                     offset: plane_offset as usize,
273                     stride: p.stride as usize,
274                     size: p.plane_size as usize,
275                 }
276             })
277             .collect();
278 
279         Ok(GuestResource {
280             handle,
281             planes,
282             width: params.frame_width,
283             height: params.frame_height,
284             format: params.format.unwrap(),
285             guest_cpu_mappable: true,
286         })
287     }
288 
289     /// Try to convert an unresolved virtio object entry into a resolved object resource.
290     ///
291     /// Convert `object` into the object resource it represents and resolve it through `res_bridge`.
292     /// Returns an error if the object's UUID is invalid or cannot be resolved to a buffer object
293     /// by `res_bridge`.
from_virtio_object_entry( object: virtio_video_object_entry, res_bridge: &base::Tube, params: &Params, ) -> Result<GuestResource, ObjectResourceCreationError>294     pub fn from_virtio_object_entry(
295         object: virtio_video_object_entry,
296         res_bridge: &base::Tube,
297         params: &Params,
298     ) -> Result<GuestResource, ObjectResourceCreationError> {
299         // We trust that the caller has chosen the correct object type.
300         let uuid = u128::from_be_bytes(object.uuid);
301 
302         // TODO(stevensd): `Virtio3DBackend::resource_assign_uuid` is currently implemented to use
303         // 32-bits resource_handles as UUIDs. Once it starts using real UUIDs, we need to update
304         // this conversion.
305         let handle = TryInto::<u32>::try_into(uuid)
306             .map_err(|_| ObjectResourceCreationError::UuidNot32Bits(uuid))?;
307 
308         let buffer_info = match resource_bridge::get_resource_info(
309             res_bridge,
310             ResourceRequest::GetBuffer { id: handle },
311         ) {
312             Ok(ResourceInfo::Buffer(buffer_info)) => buffer_info,
313             Ok(_) => return Err(ObjectResourceCreationError::NotABuffer),
314             Err(e) => return Err(ObjectResourceCreationError::ResourceBridgeFailure(e)),
315         };
316 
317         let handle = GuestResourceHandle::VirtioObject(VirtioObjectHandle {
318             // SAFETY:
319             // Safe because `buffer_info.file` is a valid file descriptor and we are stealing
320             // it.
321             desc: unsafe {
322                 SafeDescriptor::from_raw_descriptor(buffer_info.handle.into_raw_descriptor())
323             },
324             modifier: buffer_info.modifier,
325         });
326 
327         // TODO(ishitatsuyuki): Right now, there are two sources of metadata: through the
328         //                      virtio_video_params fields, or through the buffer metadata provided
329         //                      by the VirtioObject backend.
330         //                      Unfortunately neither is sufficient. The virtio_video_params struct
331         //                      lacks the plane offset, while some virtio-gpu backend doesn't
332         //                      have information about the plane size, or in some cases even the
333         //                      overall frame width and height.
334         //                      We will mix-and-match metadata from the more reliable data source
335         //                      below; ideally this should be fixed to use single source of truth.
336         let planes = params
337             .plane_formats
338             .iter()
339             .zip(&buffer_info.planes)
340             .map(|(param, buffer)| FramePlane {
341                 // When the virtio object backend was implemented, the buffer and stride was sourced
342                 // from the object backend's metadata (`buffer`). To lean on the safe side, we'll
343                 // keep using data from `buffer`, even in case of stride it's also provided by
344                 // `param`.
345                 offset: buffer.offset as usize,
346                 stride: buffer.stride as usize,
347                 size: param.plane_size as usize,
348             })
349             .collect();
350 
351         Ok(GuestResource {
352             handle,
353             planes,
354             width: params.frame_width,
355             height: params.frame_height,
356             format: params.format.unwrap(),
357             guest_cpu_mappable: buffer_info.guest_cpu_mappable,
358         })
359     }
360 
361     #[cfg(feature = "video-encoder")]
try_clone(&self) -> Result<Self, base::Error>362     pub fn try_clone(&self) -> Result<Self, base::Error> {
363         Ok(Self {
364             handle: self.handle.try_clone()?,
365             planes: self.planes.clone(),
366             width: self.width,
367             height: self.height,
368             format: self.format,
369             guest_cpu_mappable: self.guest_cpu_mappable,
370         })
371     }
372 }
373 
374 #[cfg(test)]
375 mod tests {
376     use base::MappedRegion;
377     use base::SharedMemory;
378 
379     use super::*;
380 
381     /// Creates a sparse guest memory handle using as many pages as there are entries in
382     /// `page_order`. The page with index `0` will be the first page, `1` will be the second page,
383     /// etc.
384     ///
385     /// The memory handle is filled with increasing u32s starting from page 0, then page 1, and so
386     /// on. Finally the handle is mapped into a linear space and we check that the written integers
387     /// appear in the expected order.
check_guest_mem_handle(page_order: &[usize])388     fn check_guest_mem_handle(page_order: &[usize]) {
389         const PAGE_SIZE: usize = 0x1000;
390         const U32_SIZE: usize = std::mem::size_of::<u32>();
391         const ENTRIES_PER_PAGE: usize = PAGE_SIZE / std::mem::size_of::<u32>();
392 
393         // Fill a vector of the same size as the handle with u32s of increasing value, following
394         // the page layout given as argument.
395         let mut data = vec![0u8; PAGE_SIZE * page_order.len()];
396         for (page_index, page) in page_order.iter().enumerate() {
397             let page_slice = &mut data[(page * PAGE_SIZE)..((page + 1) * PAGE_SIZE)];
398             for (index, chunk) in page_slice.chunks_exact_mut(4).enumerate() {
399                 let sized_chunk: &mut [u8; 4] = chunk.try_into().unwrap();
400                 *sized_chunk = (((page_index * ENTRIES_PER_PAGE) + index) as u32).to_ne_bytes();
401             }
402         }
403 
404         // Copy the initialized vector's content into an anonymous shared memory.
405         let mem = SharedMemory::new("data-dest", data.len() as u64).unwrap();
406         let mapping = MemoryMappingBuilder::new(mem.size() as usize)
407             .from_shared_memory(&mem)
408             .build()
409             .unwrap();
410         assert_eq!(mapping.write_slice(&data, 0).unwrap(), data.len());
411 
412         // Create the `GuestMemHandle` we will try to map and retrieve the data from.
413         let mem_handle = GuestResourceHandle::GuestPages(GuestMemHandle {
414             desc: base::clone_descriptor(&mem).unwrap(),
415             mem_areas: page_order
416                 .iter()
417                 .map(|&page| GuestMemArea {
418                     offset: page as u64 * PAGE_SIZE as u64,
419                     length: PAGE_SIZE,
420                 })
421                 .collect(),
422         });
423 
424         // Map the handle into a linear memory area, retrieve its data into a new vector, and check
425         // that its u32s appear to increase linearly.
426         let mapping = mem_handle.get_mapping(0, mem.size() as usize).unwrap();
427         let mut data = vec![0u8; PAGE_SIZE * page_order.len()];
428         // SAFETY: src and dst are valid and aligned
429         unsafe { std::ptr::copy_nonoverlapping(mapping.as_ptr(), data.as_mut_ptr(), data.len()) };
430         for (index, chunk) in data.chunks_exact(U32_SIZE).enumerate() {
431             let sized_chunk: &[u8; 4] = chunk.try_into().unwrap();
432             assert_eq!(u32::from_ne_bytes(*sized_chunk), index as u32);
433         }
434     }
435 
436     // Fill a guest memory handle with a single memory page.
437     // Then check that the data can be properly mapped and appears in the expected order.
438     #[test]
test_single_guest_mem_handle()439     fn test_single_guest_mem_handle() {
440         check_guest_mem_handle(&[0])
441     }
442 
443     // Fill a guest memory handle with 4 memory pages that are contiguous.
444     // Then check that the pages appear in the expected order in the mapping.
445     #[test]
test_linear_guest_mem_handle()446     fn test_linear_guest_mem_handle() {
447         check_guest_mem_handle(&[0, 1, 2, 3])
448     }
449 
450     // Fill a guest memory handle with 8 pages mapped in non-linear order.
451     // Then check that the pages appear in the expected order in the mapping.
452     #[test]
test_sparse_guest_mem_handle()453     fn test_sparse_guest_mem_handle() {
454         check_guest_mem_handle(&[1, 7, 6, 3, 5, 0, 4, 2])
455     }
456 }
457