xref: /aosp_15_r20/external/crosvm/devices/src/virtio/video/decoder/mod.rs (revision bb4ee6a4ae7042d18b07a98463b9c8b875e44b39)
1 // Copyright 2020 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 //! Implementation of a virtio video decoder backed by a device.
6 
7 use std::collections::btree_map::Entry;
8 use std::collections::BTreeMap;
9 use std::collections::BTreeSet;
10 use std::collections::VecDeque;
11 
12 use backend::*;
13 use base::error;
14 use base::AsRawDescriptor;
15 use base::Descriptor;
16 use base::SafeDescriptor;
17 use base::Tube;
18 use base::WaitContext;
19 use vm_memory::GuestMemory;
20 
21 use crate::virtio::video::async_cmd_desc_map::AsyncCmdDescMap;
22 use crate::virtio::video::command::QueueType;
23 use crate::virtio::video::command::VideoCmd;
24 use crate::virtio::video::control::CtrlType;
25 use crate::virtio::video::control::CtrlVal;
26 use crate::virtio::video::control::QueryCtrlType;
27 use crate::virtio::video::device::*;
28 use crate::virtio::video::error::*;
29 use crate::virtio::video::event::*;
30 use crate::virtio::video::format::*;
31 use crate::virtio::video::params::Params;
32 use crate::virtio::video::protocol;
33 use crate::virtio::video::resource::*;
34 use crate::virtio::video::response::CmdResponse;
35 
36 pub mod backend;
37 mod capability;
38 
39 use capability::*;
40 
41 type StreamId = u32;
42 type ResourceId = u32;
43 
44 // ResourceId given by the driver
45 type InputResourceId = u32;
46 type OutputResourceId = u32;
47 
48 // Id for a frame buffer passed to Chrome.
49 // We cannot use OutputResourceId as is because this ID must be between 0 and ((# of buffers) - 1).
50 //
51 // TODO(b/1518105): Once we decide to generate resource_id in the device side,
52 // we don't need this value and can pass OutputResourceId to Chrome directly.
53 type FrameBufferId = i32;
54 
55 // The result of OutputResources.queue_resource().
56 enum QueueOutputResourceResult {
57     UsingAsEos,                // The resource is kept as EOS buffer.
58     Reused(FrameBufferId),     // The resource has been registered before.
59     Registered(FrameBufferId), // The resource is queued first time.
60 }
61 
62 struct InputResource {
63     /// The actual underlying resource.
64     resource: GuestResource,
65     /// Offset from `resource` from which data starts.
66     offset: u32,
67 }
68 
69 /// Maps an input resource ID to the underlying resource and its useful information.
70 type InputResources = BTreeMap<InputResourceId, InputResource>;
71 
72 #[derive(Default)]
73 struct OutputResources {
74     // OutputResourceId <-> FrameBufferId
75     res_id_to_frame_buf_id: BTreeMap<OutputResourceId, FrameBufferId>,
76     frame_buf_id_to_res_id: BTreeMap<FrameBufferId, OutputResourceId>,
77 
78     // Store the resource id of the queued output buffers.
79     queued_res_ids: BTreeSet<OutputResourceId>,
80 
81     // Reserves output resource ID that will be used to notify EOS.
82     // If a guest enqueues a resource with this ID, the resource must not be sent to the host.
83     // Once the value is set, it won't be changed until resolution is changed or a stream is
84     // destroyed.
85     eos_resource_id: Option<OutputResourceId>,
86 
87     // This is a flag that shows whether the device's set_output_parameters has called.
88     // This will be set to true when ResourceQueue for OutputBuffer is called for the first time.
89     //
90     // TODO(b/1518105): This field is added as a hack because the current virtio-video v3 spec
91     // doesn't have a way to send a number of frame buffers the guest provides.
92     // Once we have the way in the virtio-video protocol, we should remove this flag.
93     output_params_set: bool,
94 
95     // OutputResourceId -> ResourceHandle
96     res_id_to_res_handle: BTreeMap<OutputResourceId, GuestResource>,
97 
98     // Maps the resource_id of an output buffer to its descriptor, for output buffers that may be
99     // accessed by the guest CPU which we need to poll for completion before passing to the guest.
100     res_id_to_descriptor: BTreeMap<OutputResourceId, SafeDescriptor>,
101 }
102 
103 impl OutputResources {
queue_resource( &mut self, resource_id: OutputResourceId, ) -> VideoResult<QueueOutputResourceResult>104     fn queue_resource(
105         &mut self,
106         resource_id: OutputResourceId,
107     ) -> VideoResult<QueueOutputResourceResult> {
108         if !self.queued_res_ids.insert(resource_id) {
109             error!("resource_id {} is already queued", resource_id);
110             return Err(VideoError::InvalidParameter);
111         }
112 
113         // Stores an output buffer to notify EOS.
114         // This is necessary because libvda is unable to indicate EOS along with returned buffers.
115         // For now, when a `Flush()` completes, this saved resource will be returned as a zero-sized
116         // buffer with the EOS flag.
117         // TODO(b/149725148): Remove this when libvda supports buffer flags.
118         if *self.eos_resource_id.get_or_insert(resource_id) == resource_id {
119             return Ok(QueueOutputResourceResult::UsingAsEos);
120         }
121 
122         Ok(match self.res_id_to_frame_buf_id.entry(resource_id) {
123             Entry::Occupied(e) => QueueOutputResourceResult::Reused(*e.get()),
124             Entry::Vacant(_) => {
125                 let buffer_id = self.res_id_to_frame_buf_id.len() as FrameBufferId;
126                 self.res_id_to_frame_buf_id.insert(resource_id, buffer_id);
127                 self.frame_buf_id_to_res_id.insert(buffer_id, resource_id);
128                 QueueOutputResourceResult::Registered(buffer_id)
129             }
130         })
131     }
132 
dequeue_frame_buffer( &mut self, buffer_id: FrameBufferId, stream_id: StreamId, ) -> Option<ResourceId>133     fn dequeue_frame_buffer(
134         &mut self,
135         buffer_id: FrameBufferId,
136         stream_id: StreamId,
137     ) -> Option<ResourceId> {
138         let resource_id = match self.frame_buf_id_to_res_id.get(&buffer_id) {
139             Some(id) => *id,
140             None => {
141                 error!(
142                     "unknown frame buffer id {} for stream {}",
143                     buffer_id, stream_id
144                 );
145                 return None;
146             }
147         };
148 
149         self.queued_res_ids.take(&resource_id).or_else(|| {
150             error!(
151                 "resource_id {} is not enqueued for stream {}",
152                 resource_id, stream_id
153             );
154             None
155         })
156     }
157 
dequeue_eos_resource_id(&mut self) -> Option<OutputResourceId>158     fn dequeue_eos_resource_id(&mut self) -> Option<OutputResourceId> {
159         self.queued_res_ids.take(&self.eos_resource_id?)
160     }
161 
output_params_set(&mut self) -> bool162     fn output_params_set(&mut self) -> bool {
163         if !self.output_params_set {
164             self.output_params_set = true;
165             return true;
166         }
167         false
168     }
169 }
170 
171 enum PendingResponse {
172     PictureReady {
173         picture_buffer_id: i32,
174         timestamp: u64,
175     },
176     FlushCompleted,
177     // Signals that we need to block on the `Descriptor` before processing further events.
178     BufferBarrier(Descriptor),
179     // Signals that we are currently blocking on the `Descriptor`.
180     PollingBufferBarrier(Descriptor),
181 }
182 
183 // Context is associated with one `DecoderSession`, which corresponds to one stream from the
184 // virtio-video's point of view.
185 struct Context<S: DecoderSession> {
186     stream_id: StreamId,
187 
188     in_params: Params,
189     out_params: Params,
190 
191     in_res: InputResources,
192     out_res: OutputResources,
193 
194     // Set the flag when we ask the decoder reset, and unset when the reset is done.
195     is_resetting: bool,
196 
197     pending_responses: VecDeque<PendingResponse>,
198 
199     session: Option<S>,
200 }
201 
202 impl<S: DecoderSession> Context<S> {
new( stream_id: StreamId, format: Format, in_resource_type: ResourceType, out_resource_type: ResourceType, ) -> Self203     fn new(
204         stream_id: StreamId,
205         format: Format,
206         in_resource_type: ResourceType,
207         out_resource_type: ResourceType,
208     ) -> Self {
209         const DEFAULT_WIDTH: u32 = 640;
210         const DEFAULT_HEIGHT: u32 = 480;
211         const DEFAULT_INPUT_BUFFER_SIZE: u32 = 1024 * 1024;
212 
213         let out_plane_formats =
214             PlaneFormat::get_plane_layout(Format::NV12, DEFAULT_WIDTH, DEFAULT_HEIGHT).unwrap();
215 
216         Context {
217             stream_id,
218             in_params: Params {
219                 format: Some(format),
220                 frame_width: DEFAULT_WIDTH,
221                 frame_height: DEFAULT_HEIGHT,
222                 resource_type: in_resource_type,
223                 min_buffers: 1,
224                 max_buffers: 32,
225                 plane_formats: vec![PlaneFormat {
226                     plane_size: DEFAULT_INPUT_BUFFER_SIZE,
227                     ..Default::default()
228                 }],
229                 ..Default::default()
230             },
231             out_params: Params {
232                 format: Some(Format::NV12),
233                 frame_width: DEFAULT_WIDTH,
234                 frame_height: DEFAULT_HEIGHT,
235                 resource_type: out_resource_type,
236                 plane_formats: out_plane_formats,
237                 ..Default::default()
238             },
239             in_res: Default::default(),
240             out_res: Default::default(),
241             is_resetting: false,
242             pending_responses: Default::default(),
243             session: None,
244         }
245     }
246 
output_pending_responses( &mut self, wait_ctx: &WaitContext<Token>, ) -> Vec<VideoEvtResponseType>247     fn output_pending_responses(
248         &mut self,
249         wait_ctx: &WaitContext<Token>,
250     ) -> Vec<VideoEvtResponseType> {
251         let mut event_responses = vec![];
252         while let Some(mut responses) = self.output_pending_response() {
253             event_responses.append(&mut responses);
254         }
255 
256         // Check whether the next response is a buffer barrier we need to poll on.
257         if let Some(PendingResponse::BufferBarrier(desc)) = self.pending_responses.front() {
258             let desc = Descriptor(desc.as_raw_descriptor());
259             self.pending_responses.pop_front();
260             match wait_ctx.add(&desc, Token::BufferBarrier { id: self.stream_id }) {
261                 Ok(()) => self
262                     .pending_responses
263                     .push_front(PendingResponse::PollingBufferBarrier(desc)),
264                 Err(e) => {
265                     error!("failed to add buffer FD to wait context, returning uncompleted buffer: {:#}", e)
266                 }
267             }
268         }
269 
270         event_responses
271     }
272 
output_pending_response(&mut self) -> Option<Vec<VideoEvtResponseType>>273     fn output_pending_response(&mut self) -> Option<Vec<VideoEvtResponseType>> {
274         let responses = match self.pending_responses.front()? {
275             PendingResponse::BufferBarrier(_) | PendingResponse::PollingBufferBarrier(_) => {
276                 return None
277             }
278             PendingResponse::PictureReady {
279                 picture_buffer_id,
280                 timestamp,
281             } => {
282                 let resource_id = self
283                     .out_res
284                     .dequeue_frame_buffer(*picture_buffer_id, self.stream_id)?;
285 
286                 vec![VideoEvtResponseType::AsyncCmd(
287                     AsyncCmdResponse::from_response(
288                         AsyncCmdTag::Queue {
289                             stream_id: self.stream_id,
290                             queue_type: QueueType::Output,
291                             resource_id,
292                         },
293                         CmdResponse::ResourceQueue {
294                             timestamp: *timestamp,
295                             // TODO(b/149725148): Set buffer flags once libvda exposes them.
296                             flags: 0,
297                             // `size` is only used for the encoder.
298                             size: 0,
299                         },
300                     ),
301                 )]
302             }
303             PendingResponse::FlushCompleted => {
304                 let eos_resource_id = self.out_res.dequeue_eos_resource_id()?;
305                 let eos_tag = AsyncCmdTag::Queue {
306                     stream_id: self.stream_id,
307                     queue_type: QueueType::Output,
308                     resource_id: eos_resource_id,
309                 };
310                 let eos_response = CmdResponse::ResourceQueue {
311                     timestamp: 0,
312                     flags: protocol::VIRTIO_VIDEO_BUFFER_FLAG_EOS,
313                     size: 0,
314                 };
315                 vec![
316                     VideoEvtResponseType::AsyncCmd(AsyncCmdResponse::from_response(
317                         eos_tag,
318                         eos_response,
319                     )),
320                     VideoEvtResponseType::AsyncCmd(AsyncCmdResponse::from_response(
321                         AsyncCmdTag::Drain {
322                             stream_id: self.stream_id,
323                         },
324                         CmdResponse::NoData,
325                     )),
326                 ]
327             }
328         };
329         self.pending_responses.pop_front().unwrap();
330 
331         Some(responses)
332     }
333 
register_resource( &mut self, queue_type: QueueType, resource_id: u32, resource: GuestResource, offset: u32, )334     fn register_resource(
335         &mut self,
336         queue_type: QueueType,
337         resource_id: u32,
338         resource: GuestResource,
339         offset: u32,
340     ) {
341         match queue_type {
342             QueueType::Input => {
343                 self.in_res
344                     .insert(resource_id, InputResource { resource, offset });
345             }
346             QueueType::Output => {
347                 self.out_res
348                     .res_id_to_res_handle
349                     .insert(resource_id, resource);
350             }
351         };
352     }
353 
354     /*
355      * Functions handling decoder events.
356      */
357 
handle_provide_picture_buffers( &mut self, min_num_buffers: u32, width: i32, height: i32, visible_rect: Rect, )358     fn handle_provide_picture_buffers(
359         &mut self,
360         min_num_buffers: u32,
361         width: i32,
362         height: i32,
363         visible_rect: Rect,
364     ) {
365         // We only support NV12.
366         let format = Some(Format::NV12);
367 
368         let plane_formats =
369             PlaneFormat::get_plane_layout(Format::NV12, width as u32, height as u32).unwrap();
370 
371         self.in_params.frame_width = width as u32;
372         self.in_params.frame_height = height as u32;
373 
374         self.out_params = Params {
375             format,
376             // The resource type is not changed by a provide picture buffers event.
377             resource_type: self.out_params.resource_type,
378             // Note that rect_width is sometimes smaller.
379             frame_width: width as u32,
380             frame_height: height as u32,
381             // Adding 1 to `min_buffers` to reserve a resource for `eos_resource_id`.
382             min_buffers: min_num_buffers + 1,
383             max_buffers: 32,
384             crop: Crop {
385                 left: visible_rect.left as u32,
386                 top: visible_rect.top as u32,
387                 width: (visible_rect.right - visible_rect.left) as u32,
388                 height: (visible_rect.bottom - visible_rect.top) as u32,
389             },
390             plane_formats,
391             // No need to set `frame_rate`, as it's only for the encoder.
392             ..Default::default()
393         };
394     }
395 }
396 
397 /// A thin wrapper of a map of contexts with error handlings.
398 struct ContextMap<S: DecoderSession> {
399     map: BTreeMap<StreamId, Context<S>>,
400 }
401 
402 impl<S: DecoderSession> ContextMap<S> {
insert(&mut self, ctx: Context<S>) -> VideoResult<()>403     fn insert(&mut self, ctx: Context<S>) -> VideoResult<()> {
404         match self.map.entry(ctx.stream_id) {
405             Entry::Vacant(e) => {
406                 e.insert(ctx);
407                 Ok(())
408             }
409             Entry::Occupied(_) => {
410                 error!("session {} already exists", ctx.stream_id);
411                 Err(VideoError::InvalidStreamId(ctx.stream_id))
412             }
413         }
414     }
415 
get(&self, stream_id: &StreamId) -> VideoResult<&Context<S>>416     fn get(&self, stream_id: &StreamId) -> VideoResult<&Context<S>> {
417         self.map.get(stream_id).ok_or_else(|| {
418             error!("failed to get context of stream {}", *stream_id);
419             VideoError::InvalidStreamId(*stream_id)
420         })
421     }
422 
get_mut(&mut self, stream_id: &StreamId) -> VideoResult<&mut Context<S>>423     fn get_mut(&mut self, stream_id: &StreamId) -> VideoResult<&mut Context<S>> {
424         self.map.get_mut(stream_id).ok_or_else(|| {
425             error!("failed to get context of stream {}", *stream_id);
426             VideoError::InvalidStreamId(*stream_id)
427         })
428     }
429 }
430 
431 impl<S: DecoderSession> Default for ContextMap<S> {
default() -> Self432     fn default() -> Self {
433         Self {
434             map: Default::default(),
435         }
436     }
437 }
438 
439 /// Represents information of a decoder backed by a `DecoderBackend`.
440 pub struct Decoder<D: DecoderBackend> {
441     decoder: D,
442     capability: Capability,
443     contexts: ContextMap<D::Session>,
444     resource_bridge: Tube,
445     mem: GuestMemory,
446 }
447 
448 impl<D: DecoderBackend> Decoder<D> {
449     /// Build a new decoder using the provided `backend`.
new(backend: D, resource_bridge: Tube, mem: GuestMemory) -> Self450     pub fn new(backend: D, resource_bridge: Tube, mem: GuestMemory) -> Self {
451         let capability = backend.get_capabilities();
452 
453         Self {
454             decoder: backend,
455             capability,
456             contexts: Default::default(),
457             resource_bridge,
458             mem,
459         }
460     }
461 
462     /*
463      * Functions processing virtio-video commands.
464      */
465 
query_capabilities(&self, queue_type: QueueType) -> CmdResponse466     fn query_capabilities(&self, queue_type: QueueType) -> CmdResponse {
467         let descs = match queue_type {
468             QueueType::Input => self.capability.input_formats().clone(),
469             QueueType::Output => self.capability.output_formats().clone(),
470         };
471 
472         CmdResponse::QueryCapability(descs)
473     }
474 
create_stream( &mut self, stream_id: StreamId, coded_format: Format, input_resource_type: ResourceType, output_resource_type: ResourceType, ) -> VideoResult<VideoCmdResponseType>475     fn create_stream(
476         &mut self,
477         stream_id: StreamId,
478         coded_format: Format,
479         input_resource_type: ResourceType,
480         output_resource_type: ResourceType,
481     ) -> VideoResult<VideoCmdResponseType> {
482         // Create an instance of `Context`.
483         // Note that the `DecoderSession` will be created not here but at the first call of
484         // `ResourceCreate`. This is because we need to fix a coded format for it, which
485         // will be set by `SetParams`.
486         self.contexts.insert(Context::new(
487             stream_id,
488             coded_format,
489             input_resource_type,
490             output_resource_type,
491         ))?;
492         Ok(VideoCmdResponseType::Sync(CmdResponse::NoData))
493     }
494 
destroy_stream(&mut self, stream_id: StreamId)495     fn destroy_stream(&mut self, stream_id: StreamId) {
496         if self.contexts.map.remove(&stream_id).is_none() {
497             error!("Tried to destroy an invalid stream context {}", stream_id);
498         }
499     }
500 
create_session( decoder: &mut D, wait_ctx: &WaitContext<Token>, ctx: &Context<D::Session>, stream_id: StreamId, ) -> VideoResult<D::Session>501     fn create_session(
502         decoder: &mut D,
503         wait_ctx: &WaitContext<Token>,
504         ctx: &Context<D::Session>,
505         stream_id: StreamId,
506     ) -> VideoResult<D::Session> {
507         let format = match ctx.in_params.format {
508             Some(f) => f,
509             None => {
510                 error!("bitstream format is not specified");
511                 return Err(VideoError::InvalidParameter);
512             }
513         };
514 
515         let session = decoder.new_session(format)?;
516 
517         wait_ctx
518             .add(session.event_pipe(), Token::Event { id: stream_id })
519             .map_err(|e| {
520                 error!(
521                     "failed to add FD to poll context for session {}: {}",
522                     stream_id, e
523                 );
524                 VideoError::InvalidOperation
525             })?;
526 
527         Ok(session)
528     }
529 
create_resource( &mut self, wait_ctx: &WaitContext<Token>, stream_id: StreamId, queue_type: QueueType, resource_id: ResourceId, plane_offsets: Vec<u32>, plane_entries: Vec<Vec<UnresolvedResourceEntry>>, ) -> VideoResult<VideoCmdResponseType>530     fn create_resource(
531         &mut self,
532         wait_ctx: &WaitContext<Token>,
533         stream_id: StreamId,
534         queue_type: QueueType,
535         resource_id: ResourceId,
536         plane_offsets: Vec<u32>,
537         plane_entries: Vec<Vec<UnresolvedResourceEntry>>,
538     ) -> VideoResult<VideoCmdResponseType> {
539         let ctx = self.contexts.get_mut(&stream_id)?;
540 
541         // Create a instance of `DecoderSession` at the first time `ResourceCreate` is
542         // called here.
543         if ctx.session.is_none() {
544             ctx.session = Some(Self::create_session(
545                 &mut self.decoder,
546                 wait_ctx,
547                 ctx,
548                 stream_id,
549             )?);
550         }
551 
552         // We only support single-buffer resources for now.
553         let entries = if plane_entries.len() != 1 {
554             return Err(VideoError::InvalidArgument);
555         } else {
556             // unwrap() is safe because we just tested that `plane_entries` had exactly one element.
557             plane_entries.first().unwrap()
558         };
559 
560         // Now try to resolve our resource.
561         let (resource_type, params) = match queue_type {
562             QueueType::Input => (ctx.in_params.resource_type, &ctx.in_params),
563             QueueType::Output => (ctx.out_params.resource_type, &ctx.out_params),
564         };
565 
566         let resource = match resource_type {
567             ResourceType::VirtioObject => {
568                 // Virtio object resources only have one entry.
569                 if entries.len() != 1 {
570                     return Err(VideoError::InvalidArgument);
571                 }
572                 GuestResource::from_virtio_object_entry(
573                     // SAFETY:
574                     // Safe because we confirmed the correct type for the resource.
575                     // unwrap() is also safe here because we just tested above that `entries` had
576                     // exactly one element.
577                     unsafe { entries.first().unwrap().object },
578                     &self.resource_bridge,
579                     params,
580                 )
581                 .map_err(|_| VideoError::InvalidArgument)?
582             }
583             ResourceType::GuestPages => GuestResource::from_virtio_guest_mem_entry(
584                 // SAFETY:
585                 // Safe because we confirmed the correct type for the resource.
586                 unsafe {
587                     std::slice::from_raw_parts(
588                         entries.as_ptr() as *const protocol::virtio_video_mem_entry,
589                         entries.len(),
590                     )
591                 },
592                 &self.mem,
593                 params,
594             )
595             .map_err(|_| VideoError::InvalidArgument)?,
596         };
597 
598         let offset = plane_offsets.first().copied().unwrap_or(0);
599         ctx.register_resource(queue_type, resource_id, resource, offset);
600 
601         if queue_type == QueueType::Input {
602             return Ok(VideoCmdResponseType::Sync(CmdResponse::NoData));
603         };
604 
605         // We assume ResourceCreate is not called to an output resource that is already
606         // imported to Chrome for now.
607         // TODO(keiichiw): We need to support this case for a guest client who may use
608         // arbitrary numbers of buffers. (e.g. C2V4L2Component in ARCVM)
609         // Such a client is valid as long as it uses at most 32 buffers at the same time.
610         if let Some(frame_buf_id) = ctx.out_res.res_id_to_frame_buf_id.get(&resource_id) {
611             error!(
612                 "resource {} has already been imported to Chrome as a frame buffer {}",
613                 resource_id, frame_buf_id
614             );
615             return Err(VideoError::InvalidOperation);
616         }
617 
618         Ok(VideoCmdResponseType::Sync(CmdResponse::NoData))
619     }
620 
destroy_all_resources( &mut self, stream_id: StreamId, queue_type: QueueType, ) -> VideoResult<VideoCmdResponseType>621     fn destroy_all_resources(
622         &mut self,
623         stream_id: StreamId,
624         queue_type: QueueType,
625     ) -> VideoResult<VideoCmdResponseType> {
626         let ctx = self.contexts.get_mut(&stream_id)?;
627 
628         // Reset the associated context.
629         match queue_type {
630             QueueType::Input => {
631                 ctx.in_res = Default::default();
632             }
633             QueueType::Output => {
634                 ctx.out_res = Default::default();
635             }
636         }
637         Ok(VideoCmdResponseType::Sync(CmdResponse::NoData))
638     }
639 
queue_input_resource( &mut self, stream_id: StreamId, resource_id: ResourceId, timestamp: u64, data_sizes: Vec<u32>, ) -> VideoResult<VideoCmdResponseType>640     fn queue_input_resource(
641         &mut self,
642         stream_id: StreamId,
643         resource_id: ResourceId,
644         timestamp: u64,
645         data_sizes: Vec<u32>,
646     ) -> VideoResult<VideoCmdResponseType> {
647         let ctx = self.contexts.get_mut(&stream_id)?;
648 
649         if data_sizes.len() != 1 {
650             error!("num_data_sizes must be 1 but {}", data_sizes.len());
651             return Err(VideoError::InvalidOperation);
652         }
653 
654         let session = ctx.session.as_mut().ok_or(VideoError::InvalidOperation)?;
655 
656         let InputResource { resource, offset } =
657             ctx.in_res
658                 .get(&resource_id)
659                 .ok_or(VideoError::InvalidResourceId {
660                     stream_id,
661                     resource_id,
662                 })?;
663 
664         session.decode(
665             resource_id,
666             timestamp,
667             resource
668                 .handle
669                 .try_clone()
670                 .map_err(|_| VideoError::InvalidParameter)?,
671             *offset,
672             data_sizes[0], // bytes_used
673         )?;
674 
675         Ok(VideoCmdResponseType::Async(AsyncCmdTag::Queue {
676             stream_id,
677             queue_type: QueueType::Input,
678             resource_id,
679         }))
680     }
681 
queue_output_resource( &mut self, stream_id: StreamId, resource_id: ResourceId, ) -> VideoResult<VideoCmdResponseType>682     fn queue_output_resource(
683         &mut self,
684         stream_id: StreamId,
685         resource_id: ResourceId,
686     ) -> VideoResult<VideoCmdResponseType> {
687         let ctx = self.contexts.get_mut(&stream_id)?;
688 
689         // Check if the current pixel format is set to NV12.
690         match ctx.out_params.format {
691             Some(Format::NV12) => (), // OK
692             Some(f) => {
693                 error!(
694                     "video decoder only supports NV12 as a frame format, got {}",
695                     f
696                 );
697                 return Err(VideoError::InvalidOperation);
698             }
699             None => {
700                 error!("output format is not set");
701                 return Err(VideoError::InvalidOperation);
702             }
703         };
704 
705         match ctx.out_res.queue_resource(resource_id)? {
706             QueueOutputResourceResult::UsingAsEos => {
707                 // Don't enqueue this resource to the host.
708                 Ok(())
709             }
710             QueueOutputResourceResult::Reused(buffer_id) => {
711                 let res = ctx.pending_responses.iter()
712                     .find(|&res| {
713                         matches!(res, PendingResponse::PictureReady { picture_buffer_id, .. } if *picture_buffer_id == buffer_id)
714                     });
715 
716                 if res.is_some() {
717                     Ok(())
718                 } else {
719                     ctx.session
720                         .as_mut()
721                         .ok_or(VideoError::InvalidOperation)?
722                         .reuse_output_buffer(buffer_id)
723                 }
724             }
725             QueueOutputResourceResult::Registered(buffer_id) => {
726                 // Take full ownership of the output resource, since we will only import it once
727                 // into the backend.
728                 let resource = ctx
729                     .out_res
730                     .res_id_to_res_handle
731                     .remove(&resource_id)
732                     .ok_or(VideoError::InvalidResourceId {
733                         stream_id,
734                         resource_id,
735                     })?;
736 
737                 let session = ctx.session.as_mut().ok_or(VideoError::InvalidOperation)?;
738 
739                 ctx.out_res.res_id_to_descriptor.remove(&resource_id);
740                 if resource.guest_cpu_mappable {
741                     if let GuestResourceHandle::VirtioObject(VirtioObjectHandle { desc, .. }) =
742                         &resource.handle
743                     {
744                         let desc = desc.try_clone().map_err(|e| {
745                             VideoError::BackendFailure(anyhow::anyhow!(e).context(
746                                 "failed to clone buffer descriptor for completion barrier",
747                             ))
748                         })?;
749                         ctx.out_res.res_id_to_descriptor.insert(resource_id, desc);
750                     }
751                 }
752 
753                 // Set output_buffer_count before passing the first output buffer.
754                 if ctx.out_res.output_params_set() {
755                     const OUTPUT_BUFFER_COUNT: usize = 32;
756 
757                     // Set the buffer count to the maximum value.
758                     // TODO(b/1518105): This is a hack due to the lack of way of telling a number of
759                     // frame buffers explictly in virtio-video v3 RFC. Once we have the way,
760                     // set_output_buffer_count should be called with a value passed by the guest.
761                     session.set_output_parameters(OUTPUT_BUFFER_COUNT, Format::NV12)?;
762                 }
763 
764                 session.use_output_buffer(buffer_id, resource)
765             }
766         }?;
767         Ok(VideoCmdResponseType::Async(AsyncCmdTag::Queue {
768             stream_id,
769             queue_type: QueueType::Output,
770             resource_id,
771         }))
772     }
773 
get_params( &self, stream_id: StreamId, queue_type: QueueType, is_ext: bool, ) -> VideoResult<VideoCmdResponseType>774     fn get_params(
775         &self,
776         stream_id: StreamId,
777         queue_type: QueueType,
778         is_ext: bool,
779     ) -> VideoResult<VideoCmdResponseType> {
780         let ctx = self.contexts.get(&stream_id)?;
781         let params = match queue_type {
782             QueueType::Input => ctx.in_params.clone(),
783             QueueType::Output => ctx.out_params.clone(),
784         };
785         Ok(VideoCmdResponseType::Sync(CmdResponse::GetParams {
786             queue_type,
787             params,
788             is_ext,
789         }))
790     }
791 
set_params( &mut self, stream_id: StreamId, queue_type: QueueType, params: Params, is_ext: bool, ) -> VideoResult<VideoCmdResponseType>792     fn set_params(
793         &mut self,
794         stream_id: StreamId,
795         queue_type: QueueType,
796         params: Params,
797         is_ext: bool,
798     ) -> VideoResult<VideoCmdResponseType> {
799         let ctx = self.contexts.get_mut(&stream_id)?;
800         match queue_type {
801             QueueType::Input => {
802                 if ctx.session.is_some() {
803                     error!("parameter for input cannot be changed once decoding started");
804                     return Err(VideoError::InvalidParameter);
805                 }
806 
807                 // Only a few parameters can be changed by the guest.
808                 ctx.in_params.format = params.format;
809                 ctx.in_params.plane_formats = params.plane_formats;
810                 // The resource type can only be changed through the SET_PARAMS_EXT command.
811                 if is_ext {
812                     ctx.in_params.resource_type = params.resource_type;
813                 }
814             }
815             QueueType::Output => {
816                 // The guest can only change the resource type of the output queue if no resource
817                 // has been imported yet.
818                 if ctx.out_res.output_params_set {
819                     error!("parameter for output cannot be changed once resources are imported");
820                     return Err(VideoError::InvalidParameter);
821                 }
822                 if is_ext {
823                     ctx.out_params.resource_type = params.resource_type;
824                 }
825             }
826         };
827         Ok(VideoCmdResponseType::Sync(CmdResponse::NoData))
828     }
829 
query_control(&self, ctrl_type: QueryCtrlType) -> VideoResult<VideoCmdResponseType>830     fn query_control(&self, ctrl_type: QueryCtrlType) -> VideoResult<VideoCmdResponseType> {
831         match self.capability.query_control(&ctrl_type) {
832             Some(resp) => Ok(VideoCmdResponseType::Sync(CmdResponse::QueryControl(resp))),
833             None => {
834                 error!("querying an unsupported control: {:?}", ctrl_type);
835                 Err(VideoError::InvalidArgument)
836             }
837         }
838     }
839 
get_control( &self, stream_id: StreamId, ctrl_type: CtrlType, ) -> VideoResult<VideoCmdResponseType>840     fn get_control(
841         &self,
842         stream_id: StreamId,
843         ctrl_type: CtrlType,
844     ) -> VideoResult<VideoCmdResponseType> {
845         let ctx = self.contexts.get(&stream_id)?;
846         match ctrl_type {
847             CtrlType::Profile => {
848                 let profile = match ctx.in_params.format {
849                     Some(Format::VP8) => Profile::VP8Profile0,
850                     Some(Format::VP9) => Profile::VP9Profile0,
851                     Some(Format::H264) => Profile::H264Baseline,
852                     Some(Format::Hevc) => Profile::HevcMain,
853                     Some(f) => {
854                         error!("specified format is invalid: {}", f);
855                         return Err(VideoError::InvalidArgument);
856                     }
857                     None => {
858                         error!("bitstream format is not set");
859                         return Err(VideoError::InvalidArgument);
860                     }
861                 };
862 
863                 Ok(CtrlVal::Profile(profile))
864             }
865             CtrlType::Level => {
866                 let level = match ctx.in_params.format {
867                     Some(Format::H264) => Level::H264_1_0,
868                     Some(f) => {
869                         error!("specified format has no level: {}", f);
870                         return Err(VideoError::InvalidArgument);
871                     }
872                     None => {
873                         error!("bitstream format is not set");
874                         return Err(VideoError::InvalidArgument);
875                     }
876                 };
877 
878                 Ok(CtrlVal::Level(level))
879             }
880             t => {
881                 error!("cannot get a control value: {:?}", t);
882                 Err(VideoError::InvalidArgument)
883             }
884         }
885         .map(|ctrl_val| VideoCmdResponseType::Sync(CmdResponse::GetControl(ctrl_val)))
886     }
887 
drain_stream(&mut self, stream_id: StreamId) -> VideoResult<VideoCmdResponseType>888     fn drain_stream(&mut self, stream_id: StreamId) -> VideoResult<VideoCmdResponseType> {
889         self.contexts
890             .get_mut(&stream_id)?
891             .session
892             .as_mut()
893             .ok_or(VideoError::InvalidOperation)?
894             .flush()?;
895         Ok(VideoCmdResponseType::Async(AsyncCmdTag::Drain {
896             stream_id,
897         }))
898     }
899 
clear_queue( &mut self, stream_id: StreamId, queue_type: QueueType, wait_ctx: &WaitContext<Token>, ) -> VideoResult<VideoCmdResponseType>900     fn clear_queue(
901         &mut self,
902         stream_id: StreamId,
903         queue_type: QueueType,
904         wait_ctx: &WaitContext<Token>,
905     ) -> VideoResult<VideoCmdResponseType> {
906         let ctx = self.contexts.get_mut(&stream_id)?;
907 
908         // TODO(b/153406792): Though QUEUE_CLEAR is defined as a per-queue command in the
909         // specification, the VDA's `Reset()` clears the input buffers and may (or may not) drop
910         // output buffers. So, we call it only for input and resets only the crosvm's internal
911         // context for output.
912         // This code can be a problem when a guest application wants to reset only one queue by
913         // REQBUFS(0). To handle this problem correctly, we need to make libvda expose
914         // DismissPictureBuffer() method.
915         match queue_type {
916             QueueType::Input => {
917                 if let Some(session) = ctx.session.as_mut() {
918                     session.reset()?;
919                     ctx.is_resetting = true;
920                     // Remove all the buffer barriers we are waiting on.
921                     for polled_barrier in ctx.pending_responses.iter_mut().filter_map(|r| {
922                         if let PendingResponse::PollingBufferBarrier(desc) = r {
923                             Some(desc)
924                         } else {
925                             None
926                         }
927                     }) {
928                         wait_ctx.delete(polled_barrier).unwrap_or_else(|e| {
929                             base::warn!(
930                                 "failed to remove buffer barrier from wait context: {:#}",
931                                 e
932                             )
933                         });
934                     }
935                     ctx.pending_responses.clear();
936                     Ok(VideoCmdResponseType::Async(AsyncCmdTag::Clear {
937                         stream_id,
938                         queue_type: QueueType::Input,
939                     }))
940                 } else {
941                     Ok(VideoCmdResponseType::Sync(CmdResponse::NoData))
942                 }
943             }
944             QueueType::Output => {
945                 if let Some(session) = ctx.session.as_mut() {
946                     session.clear_output_buffers()?;
947                     ctx.out_res.queued_res_ids.clear();
948                 }
949                 Ok(VideoCmdResponseType::Sync(CmdResponse::NoData))
950             }
951         }
952     }
953 }
954 
955 impl<D: DecoderBackend> Device for Decoder<D> {
process_cmd( &mut self, cmd: VideoCmd, wait_ctx: &WaitContext<Token>, ) -> ( VideoCmdResponseType, Option<(u32, Vec<VideoEvtResponseType>)>, )956     fn process_cmd(
957         &mut self,
958         cmd: VideoCmd,
959         wait_ctx: &WaitContext<Token>,
960     ) -> (
961         VideoCmdResponseType,
962         Option<(u32, Vec<VideoEvtResponseType>)>,
963     ) {
964         use VideoCmd::*;
965         use VideoCmdResponseType::Sync;
966 
967         let mut event_ret = None;
968         let cmd_response = match cmd {
969             QueryCapability { queue_type } => Ok(Sync(self.query_capabilities(queue_type))),
970             StreamCreate {
971                 stream_id,
972                 coded_format,
973                 input_resource_type,
974                 output_resource_type,
975             } => self.create_stream(
976                 stream_id,
977                 coded_format,
978                 input_resource_type,
979                 output_resource_type,
980             ),
981             StreamDestroy { stream_id } => {
982                 self.destroy_stream(stream_id);
983                 Ok(Sync(CmdResponse::NoData))
984             }
985             ResourceCreate {
986                 stream_id,
987                 queue_type,
988                 resource_id,
989                 plane_offsets,
990                 plane_entries,
991             } => self.create_resource(
992                 wait_ctx,
993                 stream_id,
994                 queue_type,
995                 resource_id,
996                 plane_offsets,
997                 plane_entries,
998             ),
999             ResourceDestroyAll {
1000                 stream_id,
1001                 queue_type,
1002             } => self.destroy_all_resources(stream_id, queue_type),
1003             ResourceQueue {
1004                 stream_id,
1005                 queue_type: QueueType::Input,
1006                 resource_id,
1007                 timestamp,
1008                 data_sizes,
1009             } => self.queue_input_resource(stream_id, resource_id, timestamp, data_sizes),
1010             ResourceQueue {
1011                 stream_id,
1012                 queue_type: QueueType::Output,
1013                 resource_id,
1014                 ..
1015             } => {
1016                 let resp = self.queue_output_resource(stream_id, resource_id);
1017                 if resp.is_ok() {
1018                     if let Ok(ctx) = self.contexts.get_mut(&stream_id) {
1019                         event_ret = Some((stream_id, ctx.output_pending_responses(wait_ctx)));
1020                     }
1021                 }
1022                 resp
1023             }
1024             GetParams {
1025                 stream_id,
1026                 queue_type,
1027                 is_ext,
1028             } => self.get_params(stream_id, queue_type, is_ext),
1029             SetParams {
1030                 stream_id,
1031                 queue_type,
1032                 params,
1033                 is_ext,
1034             } => self.set_params(stream_id, queue_type, params, is_ext),
1035             QueryControl { query_ctrl_type } => self.query_control(query_ctrl_type),
1036             GetControl {
1037                 stream_id,
1038                 ctrl_type,
1039             } => self.get_control(stream_id, ctrl_type),
1040             SetControl { .. } => {
1041                 error!("SET_CONTROL is not allowed for decoder");
1042                 Err(VideoError::InvalidOperation)
1043             }
1044             StreamDrain { stream_id } => self.drain_stream(stream_id),
1045             QueueClear {
1046                 stream_id,
1047                 queue_type,
1048             } => self.clear_queue(stream_id, queue_type, wait_ctx),
1049         };
1050 
1051         let cmd_ret = match cmd_response {
1052             Ok(r) => r,
1053             Err(e) => {
1054                 error!("returning error response: {}", &e);
1055                 Sync(e.into())
1056             }
1057         };
1058         (cmd_ret, event_ret)
1059     }
1060 
process_event( &mut self, desc_map: &mut AsyncCmdDescMap, stream_id: u32, wait_ctx: &WaitContext<Token>, ) -> Option<Vec<VideoEvtResponseType>>1061     fn process_event(
1062         &mut self,
1063         desc_map: &mut AsyncCmdDescMap,
1064         stream_id: u32,
1065         wait_ctx: &WaitContext<Token>,
1066     ) -> Option<Vec<VideoEvtResponseType>> {
1067         // TODO(b/161774071): Switch the return value from Option to VideoResult or another
1068         // result that would allow us to return an error to the caller.
1069 
1070         use crate::virtio::video::device::VideoEvtResponseType::*;
1071 
1072         let ctx = match self.contexts.get_mut(&stream_id) {
1073             Ok(ctx) => ctx,
1074             Err(e) => {
1075                 error!("failed to get a context for session {}: {}", stream_id, e);
1076                 return None;
1077             }
1078         };
1079 
1080         let session = match ctx.session.as_mut() {
1081             Some(s) => s,
1082             None => {
1083                 error!("session not yet created for context {}", stream_id);
1084                 return None;
1085             }
1086         };
1087 
1088         let event = match session.read_event() {
1089             Ok(event) => event,
1090             Err(e) => {
1091                 error!("failed to read an event from session {}: {}", stream_id, e);
1092                 return None;
1093             }
1094         };
1095 
1096         let event_responses = match event {
1097             DecoderEvent::ProvidePictureBuffers {
1098                 min_num_buffers,
1099                 width,
1100                 height,
1101                 visible_rect,
1102             } => {
1103                 ctx.handle_provide_picture_buffers(min_num_buffers, width, height, visible_rect);
1104                 vec![Event(VideoEvt {
1105                     typ: EvtType::DecResChanged,
1106                     stream_id,
1107                 })]
1108             }
1109             DecoderEvent::PictureReady {
1110                 picture_buffer_id,
1111                 timestamp,
1112                 ..
1113             } => {
1114                 if ctx.is_resetting {
1115                     vec![]
1116                 } else {
1117                     // Do we need to wait for processing on the buffer to be completed before
1118                     // passing it to the guest? If so add a barrier to our pending events.
1119                     if let Some(desc) = ctx
1120                         .out_res
1121                         .frame_buf_id_to_res_id
1122                         .get(&picture_buffer_id)
1123                         .and_then(|res_id| ctx.out_res.res_id_to_descriptor.get(res_id))
1124                     {
1125                         let desc = Descriptor(desc.as_raw_descriptor());
1126                         ctx.pending_responses
1127                             .push_back(PendingResponse::BufferBarrier(desc));
1128                     }
1129                     ctx.pending_responses
1130                         .push_back(PendingResponse::PictureReady {
1131                             picture_buffer_id,
1132                             timestamp,
1133                         });
1134                     ctx.output_pending_responses(wait_ctx)
1135                 }
1136             }
1137             DecoderEvent::NotifyEndOfBitstreamBuffer(resource_id) => {
1138                 let async_response = AsyncCmdResponse::from_response(
1139                     AsyncCmdTag::Queue {
1140                         stream_id,
1141                         queue_type: QueueType::Input,
1142                         resource_id,
1143                     },
1144                     CmdResponse::ResourceQueue {
1145                         timestamp: 0, // ignored for bitstream buffers.
1146                         flags: 0,     // no flag is raised, as it's returned successfully.
1147                         size: 0,      // this field is only for encoder
1148                     },
1149                 );
1150                 vec![AsyncCmd(async_response)]
1151             }
1152             DecoderEvent::FlushCompleted(flush_result) => {
1153                 match flush_result {
1154                     Ok(()) => {
1155                         ctx.pending_responses
1156                             .push_back(PendingResponse::FlushCompleted);
1157                         ctx.output_pending_responses(wait_ctx)
1158                     }
1159                     Err(error) => {
1160                         // TODO(b/151810591): If `resp` is `libvda::decode::Response::Canceled`,
1161                         // we should notify it to the driver in some way.
1162                         error!(
1163                             "failed to 'Flush' in VDA (stream id {}): {:?}",
1164                             stream_id, error
1165                         );
1166                         vec![AsyncCmd(AsyncCmdResponse::from_error(
1167                             AsyncCmdTag::Drain { stream_id },
1168                             error,
1169                         ))]
1170                     }
1171                 }
1172             }
1173             DecoderEvent::ResetCompleted(reset_result) => {
1174                 ctx.is_resetting = false;
1175                 let tag = AsyncCmdTag::Clear {
1176                     stream_id,
1177                     queue_type: QueueType::Input,
1178                 };
1179                 match reset_result {
1180                     Ok(()) => {
1181                         let mut responses: Vec<_> = desc_map
1182                             .create_cancellation_responses(
1183                                 &stream_id,
1184                                 Some(QueueType::Input),
1185                                 Some(tag),
1186                             )
1187                             .into_iter()
1188                             .map(AsyncCmd)
1189                             .collect();
1190                         responses.push(AsyncCmd(AsyncCmdResponse::from_response(
1191                             tag,
1192                             CmdResponse::NoData,
1193                         )));
1194                         responses
1195                     }
1196                     Err(error) => {
1197                         error!(
1198                             "failed to 'Reset' in VDA (stream id {}): {:?}",
1199                             stream_id, error
1200                         );
1201                         vec![AsyncCmd(AsyncCmdResponse::from_error(tag, error))]
1202                     }
1203                 }
1204             }
1205             DecoderEvent::NotifyError(error) => {
1206                 error!("an error is notified by VDA: {}", error);
1207                 vec![Event(VideoEvt {
1208                     typ: EvtType::Error,
1209                     stream_id,
1210                 })]
1211             }
1212         };
1213 
1214         Some(event_responses)
1215     }
1216 
process_buffer_barrier( &mut self, stream_id: u32, wait_ctx: &WaitContext<Token>, ) -> Option<Vec<VideoEvtResponseType>>1217     fn process_buffer_barrier(
1218         &mut self,
1219         stream_id: u32,
1220         wait_ctx: &WaitContext<Token>,
1221     ) -> Option<Vec<VideoEvtResponseType>> {
1222         let ctx = match self.contexts.get_mut(&stream_id) {
1223             Ok(ctx) => ctx,
1224             Err(e) => {
1225                 error!("failed to get a context for session {}: {}", stream_id, e);
1226                 return None;
1227             }
1228         };
1229 
1230         match ctx.pending_responses.front() {
1231             Some(PendingResponse::PollingBufferBarrier(desc)) => {
1232                 // `delete` can return an error if the descriptor has been closed by e.g. the GPU
1233                 // driver. We can safely ignore these.
1234                 let _ = wait_ctx.delete(&Descriptor(desc.as_raw_descriptor()));
1235                 ctx.pending_responses.pop_front();
1236             }
1237             _ => {
1238                 error!("expected a buffer barrier, but found none");
1239             }
1240         }
1241 
1242         Some(ctx.output_pending_responses(wait_ctx))
1243     }
1244 }
1245