1 // Copyright 2018 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 mod edid;
6 mod parameters;
7 mod protocol;
8 mod virtio_gpu;
9
10 use std::cell::RefCell;
11 use std::collections::BTreeMap;
12 use std::io::Read;
13 use std::path::PathBuf;
14 use std::rc::Rc;
15 use std::sync::atomic::AtomicBool;
16 use std::sync::atomic::Ordering;
17 use std::sync::mpsc;
18 use std::sync::Arc;
19
20 use anyhow::anyhow;
21 use anyhow::Context;
22 use base::custom_serde::deserialize_map_from_kv_vec;
23 use base::custom_serde::serialize_map_as_kv_vec;
24 use base::debug;
25 use base::error;
26 use base::info;
27 #[cfg(any(target_os = "android", target_os = "linux"))]
28 use base::linux::move_task_to_cgroup;
29 use base::warn;
30 use base::AsRawDescriptor;
31 use base::Event;
32 use base::EventToken;
33 use base::RawDescriptor;
34 use base::ReadNotifier;
35 #[cfg(windows)]
36 use base::RecvTube;
37 use base::Result;
38 use base::SafeDescriptor;
39 use base::SendTube;
40 use base::Tube;
41 use base::VmEventType;
42 use base::WaitContext;
43 use base::WorkerThread;
44 use data_model::*;
45 pub use gpu_display::EventDevice;
46 use gpu_display::*;
47 use hypervisor::MemCacheType;
48 pub use parameters::AudioDeviceMode;
49 pub use parameters::GpuParameters;
50 use rutabaga_gfx::*;
51 use serde::Deserialize;
52 use serde::Serialize;
53 use sync::Mutex;
54 pub use vm_control::gpu::DisplayMode as GpuDisplayMode;
55 pub use vm_control::gpu::DisplayParameters as GpuDisplayParameters;
56 use vm_control::gpu::GpuControlCommand;
57 use vm_control::gpu::GpuControlResult;
58 pub use vm_control::gpu::MouseMode as GpuMouseMode;
59 pub use vm_control::gpu::DEFAULT_DISPLAY_HEIGHT;
60 pub use vm_control::gpu::DEFAULT_DISPLAY_WIDTH;
61 pub use vm_control::gpu::DEFAULT_REFRESH_RATE;
62 #[cfg(windows)]
63 use vm_control::ModifyWaitContext;
64 use vm_memory::GuestAddress;
65 use vm_memory::GuestMemory;
66 use zerocopy::AsBytes;
67
68 pub use self::protocol::virtio_gpu_config;
69 pub use self::protocol::VIRTIO_GPU_F_CONTEXT_INIT;
70 pub use self::protocol::VIRTIO_GPU_F_CREATE_GUEST_HANDLE;
71 pub use self::protocol::VIRTIO_GPU_F_EDID;
72 pub use self::protocol::VIRTIO_GPU_F_FENCE_PASSING;
73 pub use self::protocol::VIRTIO_GPU_F_RESOURCE_BLOB;
74 pub use self::protocol::VIRTIO_GPU_F_RESOURCE_UUID;
75 pub use self::protocol::VIRTIO_GPU_F_VIRGL;
76 pub use self::protocol::VIRTIO_GPU_MAX_SCANOUTS;
77 pub use self::protocol::VIRTIO_GPU_SHM_ID_HOST_VISIBLE;
78 use self::protocol::*;
79 use self::virtio_gpu::to_rutabaga_descriptor;
80 pub use self::virtio_gpu::ProcessDisplayResult;
81 use self::virtio_gpu::VirtioGpu;
82 use self::virtio_gpu::VirtioGpuSnapshot;
83 use super::copy_config;
84 use super::resource_bridge::ResourceRequest;
85 use super::DescriptorChain;
86 use super::DeviceType;
87 use super::Interrupt;
88 use super::Queue;
89 use super::Reader;
90 use super::SharedMemoryMapper;
91 use super::SharedMemoryPrepareType;
92 use super::SharedMemoryRegion;
93 use super::VirtioDevice;
94 use super::Writer;
95 use crate::PciAddress;
96
97 // First queue is for virtio gpu commands. Second queue is for cursor commands, which we expect
98 // there to be fewer of.
99 const QUEUE_SIZES: &[u16] = &[512, 16];
100
101 #[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
102 pub enum GpuMode {
103 #[serde(rename = "2d", alias = "2D")]
104 Mode2D,
105 #[cfg(feature = "virgl_renderer")]
106 #[serde(rename = "virglrenderer", alias = "3d", alias = "3D")]
107 ModeVirglRenderer,
108 #[cfg(feature = "gfxstream")]
109 #[serde(rename = "gfxstream")]
110 ModeGfxstream,
111 }
112
113 impl Default for GpuMode {
default() -> Self114 fn default() -> Self {
115 #[cfg(all(windows, feature = "gfxstream"))]
116 return GpuMode::ModeGfxstream;
117
118 #[cfg(all(unix, feature = "virgl_renderer"))]
119 return GpuMode::ModeVirglRenderer;
120
121 #[cfg(not(any(
122 all(windows, feature = "gfxstream"),
123 all(unix, feature = "virgl_renderer"),
124 )))]
125 return GpuMode::Mode2D;
126 }
127 }
128
129 #[derive(Clone, Debug, Serialize, Deserialize)]
130 #[serde(rename_all = "kebab-case")]
131 pub enum GpuWsi {
132 #[serde(alias = "vk")]
133 Vulkan,
134 }
135
136 #[derive(Copy, Clone, Debug)]
137 pub struct VirtioScanoutBlobData {
138 pub width: u32,
139 pub height: u32,
140 pub drm_format: DrmFormat,
141 pub strides: [u32; 4],
142 pub offsets: [u32; 4],
143 }
144
145 #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
146 enum VirtioGpuRing {
147 Global,
148 ContextSpecific { ctx_id: u32, ring_idx: u8 },
149 }
150
151 struct FenceDescriptor {
152 ring: VirtioGpuRing,
153 fence_id: u64,
154 desc_chain: DescriptorChain,
155 len: u32,
156 }
157
158 #[derive(Default)]
159 pub struct FenceState {
160 descs: Vec<FenceDescriptor>,
161 completed_fences: BTreeMap<VirtioGpuRing, u64>,
162 }
163
164 #[derive(Serialize, Deserialize)]
165 struct FenceStateSnapshot {
166 // Customize serialization to avoid errors when trying to use objects as keys in JSON
167 // dictionaries.
168 #[serde(
169 serialize_with = "serialize_map_as_kv_vec",
170 deserialize_with = "deserialize_map_from_kv_vec"
171 )]
172 completed_fences: BTreeMap<VirtioGpuRing, u64>,
173 }
174
175 impl FenceState {
snapshot(&self) -> FenceStateSnapshot176 fn snapshot(&self) -> FenceStateSnapshot {
177 assert!(self.descs.is_empty(), "can't snapshot with pending fences");
178 FenceStateSnapshot {
179 completed_fences: self.completed_fences.clone(),
180 }
181 }
182
restore(&mut self, snapshot: FenceStateSnapshot)183 fn restore(&mut self, snapshot: FenceStateSnapshot) {
184 assert!(self.descs.is_empty(), "can't restore activated device");
185 self.completed_fences = snapshot.completed_fences;
186 }
187 }
188
189 pub trait QueueReader {
pop(&self) -> Option<DescriptorChain>190 fn pop(&self) -> Option<DescriptorChain>;
add_used(&self, desc_chain: DescriptorChain, len: u32)191 fn add_used(&self, desc_chain: DescriptorChain, len: u32);
signal_used(&self)192 fn signal_used(&self);
193 }
194
195 struct LocalQueueReader {
196 queue: RefCell<Queue>,
197 }
198
199 impl LocalQueueReader {
new(queue: Queue) -> Self200 fn new(queue: Queue) -> Self {
201 Self {
202 queue: RefCell::new(queue),
203 }
204 }
205 }
206
207 impl QueueReader for LocalQueueReader {
pop(&self) -> Option<DescriptorChain>208 fn pop(&self) -> Option<DescriptorChain> {
209 self.queue.borrow_mut().pop()
210 }
211
add_used(&self, desc_chain: DescriptorChain, len: u32)212 fn add_used(&self, desc_chain: DescriptorChain, len: u32) {
213 self.queue.borrow_mut().add_used(desc_chain, len)
214 }
215
signal_used(&self)216 fn signal_used(&self) {
217 self.queue.borrow_mut().trigger_interrupt();
218 }
219 }
220
221 #[derive(Clone)]
222 struct SharedQueueReader {
223 queue: Arc<Mutex<Queue>>,
224 }
225
226 impl SharedQueueReader {
new(queue: Queue) -> Self227 fn new(queue: Queue) -> Self {
228 Self {
229 queue: Arc::new(Mutex::new(queue)),
230 }
231 }
232 }
233
234 impl QueueReader for SharedQueueReader {
pop(&self) -> Option<DescriptorChain>235 fn pop(&self) -> Option<DescriptorChain> {
236 self.queue.lock().pop()
237 }
238
add_used(&self, desc_chain: DescriptorChain, len: u32)239 fn add_used(&self, desc_chain: DescriptorChain, len: u32) {
240 self.queue.lock().add_used(desc_chain, len)
241 }
242
signal_used(&self)243 fn signal_used(&self) {
244 self.queue.lock().trigger_interrupt();
245 }
246 }
247
248 /// Initializes the virtio_gpu state tracker.
build( display_backends: &[DisplayBackend], display_params: Vec<GpuDisplayParameters>, display_event: Arc<AtomicBool>, rutabaga: Rutabaga, mapper: Arc<Mutex<Option<Box<dyn SharedMemoryMapper>>>>, external_blob: bool, fixed_blob_mapping: bool, #[cfg(windows)] wndproc_thread: &mut Option<WindowProcedureThread>, udmabuf: bool, #[cfg(windows)] gpu_display_wait_descriptor_ctrl_wr: SendTube, ) -> Option<VirtioGpu>249 fn build(
250 display_backends: &[DisplayBackend],
251 display_params: Vec<GpuDisplayParameters>,
252 display_event: Arc<AtomicBool>,
253 rutabaga: Rutabaga,
254 mapper: Arc<Mutex<Option<Box<dyn SharedMemoryMapper>>>>,
255 external_blob: bool,
256 fixed_blob_mapping: bool,
257 #[cfg(windows)] wndproc_thread: &mut Option<WindowProcedureThread>,
258 udmabuf: bool,
259 #[cfg(windows)] gpu_display_wait_descriptor_ctrl_wr: SendTube,
260 ) -> Option<VirtioGpu> {
261 let mut display_opt = None;
262 for display_backend in display_backends {
263 match display_backend.build(
264 #[cfg(windows)]
265 wndproc_thread,
266 #[cfg(windows)]
267 gpu_display_wait_descriptor_ctrl_wr
268 .try_clone()
269 .expect("failed to clone wait context ctrl channel"),
270 ) {
271 Ok(c) => {
272 display_opt = Some(c);
273 break;
274 }
275 Err(e) => error!("failed to open display: {}", e),
276 };
277 }
278
279 let display = match display_opt {
280 Some(d) => d,
281 None => {
282 error!("failed to open any displays");
283 return None;
284 }
285 };
286
287 VirtioGpu::new(
288 display,
289 display_params,
290 display_event,
291 rutabaga,
292 mapper,
293 external_blob,
294 fixed_blob_mapping,
295 udmabuf,
296 )
297 }
298
299 /// Resources used by the fence handler.
300 pub struct FenceHandlerActivationResources<Q>
301 where
302 Q: QueueReader + Send + Clone + 'static,
303 {
304 pub mem: GuestMemory,
305 pub ctrl_queue: Q,
306 }
307
308 /// Create a handler that writes into the completed fence queue
create_fence_handler<Q>( fence_handler_resources: Arc<Mutex<Option<FenceHandlerActivationResources<Q>>>>, fence_state: Arc<Mutex<FenceState>>, ) -> RutabagaFenceHandler where Q: QueueReader + Send + Clone + 'static,309 pub fn create_fence_handler<Q>(
310 fence_handler_resources: Arc<Mutex<Option<FenceHandlerActivationResources<Q>>>>,
311 fence_state: Arc<Mutex<FenceState>>,
312 ) -> RutabagaFenceHandler
313 where
314 Q: QueueReader + Send + Clone + 'static,
315 {
316 RutabagaFenceHandler::new(move |completed_fence: RutabagaFence| {
317 let mut signal = false;
318
319 if let Some(ref fence_handler_resources) = *fence_handler_resources.lock() {
320 // Limits the lifetime of `fence_state`:
321 {
322 let ring = match completed_fence.flags & VIRTIO_GPU_FLAG_INFO_RING_IDX {
323 0 => VirtioGpuRing::Global,
324 _ => VirtioGpuRing::ContextSpecific {
325 ctx_id: completed_fence.ctx_id,
326 ring_idx: completed_fence.ring_idx,
327 },
328 };
329
330 let mut fence_state = fence_state.lock();
331 // TODO(dverkamp): use `drain_filter()` when it is stabilized
332 let mut i = 0;
333 while i < fence_state.descs.len() {
334 if fence_state.descs[i].ring == ring
335 && fence_state.descs[i].fence_id <= completed_fence.fence_id
336 {
337 let completed_desc = fence_state.descs.remove(i);
338 fence_handler_resources
339 .ctrl_queue
340 .add_used(completed_desc.desc_chain, completed_desc.len);
341 signal = true;
342 } else {
343 i += 1;
344 }
345 }
346
347 // Update the last completed fence for this context
348 fence_state
349 .completed_fences
350 .insert(ring, completed_fence.fence_id);
351 }
352
353 if signal {
354 fence_handler_resources.ctrl_queue.signal_used();
355 }
356 }
357 })
358 }
359
360 pub struct ReturnDescriptor {
361 pub desc_chain: DescriptorChain,
362 pub len: u32,
363 }
364
365 pub struct Frontend {
366 fence_state: Arc<Mutex<FenceState>>,
367 virtio_gpu: VirtioGpu,
368 }
369
370 impl Frontend {
new(virtio_gpu: VirtioGpu, fence_state: Arc<Mutex<FenceState>>) -> Frontend371 fn new(virtio_gpu: VirtioGpu, fence_state: Arc<Mutex<FenceState>>) -> Frontend {
372 Frontend {
373 fence_state,
374 virtio_gpu,
375 }
376 }
377
378 /// Returns the internal connection to the compositor and its associated state.
display(&mut self) -> &Rc<RefCell<GpuDisplay>>379 pub fn display(&mut self) -> &Rc<RefCell<GpuDisplay>> {
380 self.virtio_gpu.display()
381 }
382
383 /// Processes the internal `display` events and returns `true` if any display was closed.
process_display(&mut self) -> ProcessDisplayResult384 pub fn process_display(&mut self) -> ProcessDisplayResult {
385 self.virtio_gpu.process_display()
386 }
387
388 /// Processes incoming requests on `resource_bridge`.
process_resource_bridge(&mut self, resource_bridge: &Tube) -> anyhow::Result<()>389 pub fn process_resource_bridge(&mut self, resource_bridge: &Tube) -> anyhow::Result<()> {
390 let response = match resource_bridge.recv() {
391 Ok(ResourceRequest::GetBuffer { id }) => self.virtio_gpu.export_resource(id),
392 Ok(ResourceRequest::GetFence { seqno }) => self.virtio_gpu.export_fence(seqno),
393 Err(e) => return Err(e).context("Error receiving resource bridge request"),
394 };
395
396 resource_bridge
397 .send(&response)
398 .context("Error sending resource bridge response")?;
399
400 Ok(())
401 }
402
403 /// Processes the GPU control command and returns the result with a bool indicating if the
404 /// GPU device's config needs to be updated.
process_gpu_control_command(&mut self, cmd: GpuControlCommand) -> GpuControlResult405 pub fn process_gpu_control_command(&mut self, cmd: GpuControlCommand) -> GpuControlResult {
406 self.virtio_gpu.process_gpu_control_command(cmd)
407 }
408
process_gpu_command( &mut self, mem: &GuestMemory, cmd: GpuCommand, reader: &mut Reader, ) -> VirtioGpuResult409 fn process_gpu_command(
410 &mut self,
411 mem: &GuestMemory,
412 cmd: GpuCommand,
413 reader: &mut Reader,
414 ) -> VirtioGpuResult {
415 self.virtio_gpu.force_ctx_0();
416
417 match cmd {
418 GpuCommand::GetDisplayInfo(_) => Ok(GpuResponse::OkDisplayInfo(
419 self.virtio_gpu.display_info().to_vec(),
420 )),
421 GpuCommand::ResourceCreate2d(info) => {
422 let resource_id = info.resource_id.to_native();
423
424 let resource_create_3d = ResourceCreate3D {
425 target: RUTABAGA_PIPE_TEXTURE_2D,
426 format: info.format.to_native(),
427 bind: RUTABAGA_PIPE_BIND_RENDER_TARGET,
428 width: info.width.to_native(),
429 height: info.height.to_native(),
430 depth: 1,
431 array_size: 1,
432 last_level: 0,
433 nr_samples: 0,
434 flags: 0,
435 };
436
437 self.virtio_gpu
438 .resource_create_3d(resource_id, resource_create_3d)
439 }
440 GpuCommand::ResourceUnref(info) => {
441 self.virtio_gpu.unref_resource(info.resource_id.to_native())
442 }
443 GpuCommand::SetScanout(info) => self.virtio_gpu.set_scanout(
444 info.scanout_id.to_native(),
445 info.resource_id.to_native(),
446 None,
447 ),
448 GpuCommand::ResourceFlush(info) => {
449 self.virtio_gpu.flush_resource(info.resource_id.to_native())
450 }
451 GpuCommand::TransferToHost2d(info) => {
452 let resource_id = info.resource_id.to_native();
453 let transfer = Transfer3D::new_2d(
454 info.r.x.to_native(),
455 info.r.y.to_native(),
456 info.r.width.to_native(),
457 info.r.height.to_native(),
458 info.offset.to_native(),
459 );
460 self.virtio_gpu.transfer_write(0, resource_id, transfer)
461 }
462 GpuCommand::ResourceAttachBacking(info) => {
463 let available_bytes = reader.available_bytes();
464 if available_bytes != 0 {
465 let entry_count = info.nr_entries.to_native() as usize;
466 let mut vecs = Vec::with_capacity(entry_count);
467 for _ in 0..entry_count {
468 match reader.read_obj::<virtio_gpu_mem_entry>() {
469 Ok(entry) => {
470 let addr = GuestAddress(entry.addr.to_native());
471 let len = entry.length.to_native() as usize;
472 vecs.push((addr, len))
473 }
474 Err(_) => return Err(GpuResponse::ErrUnspec),
475 }
476 }
477 self.virtio_gpu
478 .attach_backing(info.resource_id.to_native(), mem, vecs)
479 } else {
480 error!("missing data for command {:?}", cmd);
481 Err(GpuResponse::ErrUnspec)
482 }
483 }
484 GpuCommand::ResourceDetachBacking(info) => {
485 self.virtio_gpu.detach_backing(info.resource_id.to_native())
486 }
487 GpuCommand::UpdateCursor(info) => self.virtio_gpu.update_cursor(
488 info.resource_id.to_native(),
489 info.pos.scanout_id.to_native(),
490 info.pos.x.into(),
491 info.pos.y.into(),
492 ),
493 GpuCommand::MoveCursor(info) => self.virtio_gpu.move_cursor(
494 info.pos.scanout_id.to_native(),
495 info.pos.x.into(),
496 info.pos.y.into(),
497 ),
498 GpuCommand::ResourceAssignUuid(info) => {
499 let resource_id = info.resource_id.to_native();
500 self.virtio_gpu.resource_assign_uuid(resource_id)
501 }
502 GpuCommand::GetCapsetInfo(info) => self
503 .virtio_gpu
504 .get_capset_info(info.capset_index.to_native()),
505 GpuCommand::GetCapset(info) => self
506 .virtio_gpu
507 .get_capset(info.capset_id.to_native(), info.capset_version.to_native()),
508 GpuCommand::CtxCreate(info) => {
509 let context_name: Option<String> = String::from_utf8(info.debug_name.to_vec()).ok();
510 self.virtio_gpu.create_context(
511 info.hdr.ctx_id.to_native(),
512 info.context_init.to_native(),
513 context_name.as_deref(),
514 )
515 }
516 GpuCommand::CtxDestroy(info) => {
517 self.virtio_gpu.destroy_context(info.hdr.ctx_id.to_native())
518 }
519 GpuCommand::CtxAttachResource(info) => self
520 .virtio_gpu
521 .context_attach_resource(info.hdr.ctx_id.to_native(), info.resource_id.to_native()),
522 GpuCommand::CtxDetachResource(info) => self
523 .virtio_gpu
524 .context_detach_resource(info.hdr.ctx_id.to_native(), info.resource_id.to_native()),
525 GpuCommand::ResourceCreate3d(info) => {
526 let resource_id = info.resource_id.to_native();
527 let resource_create_3d = ResourceCreate3D {
528 target: info.target.to_native(),
529 format: info.format.to_native(),
530 bind: info.bind.to_native(),
531 width: info.width.to_native(),
532 height: info.height.to_native(),
533 depth: info.depth.to_native(),
534 array_size: info.array_size.to_native(),
535 last_level: info.last_level.to_native(),
536 nr_samples: info.nr_samples.to_native(),
537 flags: info.flags.to_native(),
538 };
539
540 self.virtio_gpu
541 .resource_create_3d(resource_id, resource_create_3d)
542 }
543 GpuCommand::TransferToHost3d(info) => {
544 let ctx_id = info.hdr.ctx_id.to_native();
545 let resource_id = info.resource_id.to_native();
546
547 let transfer = Transfer3D {
548 x: info.box_.x.to_native(),
549 y: info.box_.y.to_native(),
550 z: info.box_.z.to_native(),
551 w: info.box_.w.to_native(),
552 h: info.box_.h.to_native(),
553 d: info.box_.d.to_native(),
554 level: info.level.to_native(),
555 stride: info.stride.to_native(),
556 layer_stride: info.layer_stride.to_native(),
557 offset: info.offset.to_native(),
558 };
559
560 self.virtio_gpu
561 .transfer_write(ctx_id, resource_id, transfer)
562 }
563 GpuCommand::TransferFromHost3d(info) => {
564 let ctx_id = info.hdr.ctx_id.to_native();
565 let resource_id = info.resource_id.to_native();
566
567 let transfer = Transfer3D {
568 x: info.box_.x.to_native(),
569 y: info.box_.y.to_native(),
570 z: info.box_.z.to_native(),
571 w: info.box_.w.to_native(),
572 h: info.box_.h.to_native(),
573 d: info.box_.d.to_native(),
574 level: info.level.to_native(),
575 stride: info.stride.to_native(),
576 layer_stride: info.layer_stride.to_native(),
577 offset: info.offset.to_native(),
578 };
579
580 self.virtio_gpu
581 .transfer_read(ctx_id, resource_id, transfer, None)
582 }
583 GpuCommand::CmdSubmit3d(info) => {
584 if reader.available_bytes() != 0 {
585 let num_in_fences = info.num_in_fences.to_native() as usize;
586 let cmd_size = info.size.to_native() as usize;
587 let mut cmd_buf = vec![0; cmd_size];
588 let mut fence_ids: Vec<u64> = Vec::with_capacity(num_in_fences);
589 let ctx_id = info.hdr.ctx_id.to_native();
590
591 for _ in 0..num_in_fences {
592 match reader.read_obj::<Le64>() {
593 Ok(fence_id) => {
594 fence_ids.push(fence_id.to_native());
595 }
596 Err(_) => return Err(GpuResponse::ErrUnspec),
597 }
598 }
599
600 if reader.read_exact(&mut cmd_buf[..]).is_ok() {
601 self.virtio_gpu
602 .submit_command(ctx_id, &mut cmd_buf[..], &fence_ids[..])
603 } else {
604 Err(GpuResponse::ErrInvalidParameter)
605 }
606 } else {
607 // Silently accept empty command buffers to allow for
608 // benchmarking.
609 Ok(GpuResponse::OkNoData)
610 }
611 }
612 GpuCommand::ResourceCreateBlob(info) => {
613 let resource_id = info.resource_id.to_native();
614 let ctx_id = info.hdr.ctx_id.to_native();
615
616 let resource_create_blob = ResourceCreateBlob {
617 blob_mem: info.blob_mem.to_native(),
618 blob_flags: info.blob_flags.to_native(),
619 blob_id: info.blob_id.to_native(),
620 size: info.size.to_native(),
621 };
622
623 let entry_count = info.nr_entries.to_native();
624 if reader.available_bytes() == 0 && entry_count > 0 {
625 return Err(GpuResponse::ErrUnspec);
626 }
627
628 let mut vecs = Vec::with_capacity(entry_count as usize);
629 for _ in 0..entry_count {
630 match reader.read_obj::<virtio_gpu_mem_entry>() {
631 Ok(entry) => {
632 let addr = GuestAddress(entry.addr.to_native());
633 let len = entry.length.to_native() as usize;
634 vecs.push((addr, len))
635 }
636 Err(_) => return Err(GpuResponse::ErrUnspec),
637 }
638 }
639
640 self.virtio_gpu.resource_create_blob(
641 ctx_id,
642 resource_id,
643 resource_create_blob,
644 vecs,
645 mem,
646 )
647 }
648 GpuCommand::SetScanoutBlob(info) => {
649 let scanout_id = info.scanout_id.to_native();
650 let resource_id = info.resource_id.to_native();
651 let virtio_gpu_format = info.format.to_native();
652 let width = info.width.to_native();
653 let height = info.height.to_native();
654 let mut strides: [u32; 4] = [0; 4];
655 let mut offsets: [u32; 4] = [0; 4];
656
657 // As of v4.19, virtio-gpu kms only really uses these formats. If that changes,
658 // the following may have to change too.
659 let drm_format = match virtio_gpu_format {
660 VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM => DrmFormat::new(b'X', b'R', b'2', b'4'),
661 VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM => DrmFormat::new(b'A', b'R', b'2', b'4'),
662 _ => {
663 error!("unrecognized virtio-gpu format {}", virtio_gpu_format);
664 return Err(GpuResponse::ErrUnspec);
665 }
666 };
667
668 for plane_index in 0..PLANE_INFO_MAX_COUNT {
669 offsets[plane_index] = info.offsets[plane_index].to_native();
670 strides[plane_index] = info.strides[plane_index].to_native();
671 }
672
673 let scanout = VirtioScanoutBlobData {
674 width,
675 height,
676 drm_format,
677 strides,
678 offsets,
679 };
680
681 self.virtio_gpu
682 .set_scanout(scanout_id, resource_id, Some(scanout))
683 }
684 GpuCommand::ResourceMapBlob(info) => {
685 let resource_id = info.resource_id.to_native();
686 let offset = info.offset.to_native();
687 self.virtio_gpu.resource_map_blob(resource_id, offset)
688 }
689 GpuCommand::ResourceUnmapBlob(info) => {
690 let resource_id = info.resource_id.to_native();
691 self.virtio_gpu.resource_unmap_blob(resource_id)
692 }
693 GpuCommand::GetEdid(info) => self.virtio_gpu.get_edid(info.scanout.to_native()),
694 }
695 }
696
697 /// Processes virtio messages on `queue`.
process_queue(&mut self, mem: &GuestMemory, queue: &dyn QueueReader) -> bool698 pub fn process_queue(&mut self, mem: &GuestMemory, queue: &dyn QueueReader) -> bool {
699 let mut signal_used = false;
700 while let Some(desc) = queue.pop() {
701 if let Some(ret_desc) = self.process_descriptor(mem, desc) {
702 queue.add_used(ret_desc.desc_chain, ret_desc.len);
703 signal_used = true;
704 }
705 }
706
707 signal_used
708 }
709
process_descriptor( &mut self, mem: &GuestMemory, mut desc_chain: DescriptorChain, ) -> Option<ReturnDescriptor>710 fn process_descriptor(
711 &mut self,
712 mem: &GuestMemory,
713 mut desc_chain: DescriptorChain,
714 ) -> Option<ReturnDescriptor> {
715 let reader = &mut desc_chain.reader;
716 let writer = &mut desc_chain.writer;
717 let mut resp = Err(GpuResponse::ErrUnspec);
718 let mut gpu_cmd = None;
719 let mut len = 0;
720 match GpuCommand::decode(reader) {
721 Ok(cmd) => {
722 resp = self.process_gpu_command(mem, cmd, reader);
723 gpu_cmd = Some(cmd);
724 }
725 Err(e) => debug!("descriptor decode error: {}", e),
726 }
727
728 let mut gpu_response = match resp {
729 Ok(gpu_response) => gpu_response,
730 Err(gpu_response) => {
731 if let Some(gpu_cmd) = gpu_cmd {
732 error!(
733 "error processing gpu command {:?}: {:?}",
734 gpu_cmd, gpu_response
735 );
736 }
737 gpu_response
738 }
739 };
740
741 if writer.available_bytes() != 0 {
742 let mut fence_id = 0;
743 let mut ctx_id = 0;
744 let mut flags = 0;
745 let mut ring_idx = 0;
746 if let Some(cmd) = gpu_cmd {
747 let ctrl_hdr = cmd.ctrl_hdr();
748 if ctrl_hdr.flags.to_native() & VIRTIO_GPU_FLAG_FENCE != 0 {
749 flags = ctrl_hdr.flags.to_native();
750 fence_id = ctrl_hdr.fence_id.to_native();
751 ctx_id = ctrl_hdr.ctx_id.to_native();
752 ring_idx = ctrl_hdr.ring_idx;
753
754 let fence = RutabagaFence {
755 flags,
756 fence_id,
757 ctx_id,
758 ring_idx,
759 };
760 gpu_response = match self.virtio_gpu.create_fence(fence) {
761 Ok(_) => gpu_response,
762 Err(fence_resp) => {
763 warn!("create_fence {} -> {:?}", fence_id, fence_resp);
764 fence_resp
765 }
766 };
767 }
768 }
769
770 // Prepare the response now, even if it is going to wait until
771 // fence is complete.
772 match gpu_response.encode(flags, fence_id, ctx_id, ring_idx, writer) {
773 Ok(l) => len = l,
774 Err(e) => debug!("ctrl queue response encode error: {}", e),
775 }
776
777 if flags & VIRTIO_GPU_FLAG_FENCE != 0 {
778 let ring = match flags & VIRTIO_GPU_FLAG_INFO_RING_IDX {
779 0 => VirtioGpuRing::Global,
780 _ => VirtioGpuRing::ContextSpecific { ctx_id, ring_idx },
781 };
782
783 // In case the fence is signaled immediately after creation, don't add a return
784 // FenceDescriptor.
785 let mut fence_state = self.fence_state.lock();
786 if fence_id > *fence_state.completed_fences.get(&ring).unwrap_or(&0) {
787 fence_state.descs.push(FenceDescriptor {
788 ring,
789 fence_id,
790 desc_chain,
791 len,
792 });
793
794 return None;
795 }
796 }
797
798 // No fence (or already completed fence), respond now.
799 }
800 Some(ReturnDescriptor { desc_chain, len })
801 }
802
event_poll(&self)803 pub fn event_poll(&self) {
804 self.virtio_gpu.event_poll();
805 }
806 }
807
808 #[derive(EventToken, PartialEq, Eq, Clone, Copy, Debug)]
809 enum WorkerToken {
810 CtrlQueue,
811 CursorQueue,
812 Display,
813 GpuControl,
814 InterruptResample,
815 Sleep,
816 Kill,
817 ResourceBridge {
818 index: usize,
819 },
820 VirtioGpuPoll,
821 #[cfg(windows)]
822 DisplayDescriptorRequest,
823 }
824
825 struct EventManager<'a> {
826 pub wait_ctx: WaitContext<WorkerToken>,
827 events: Vec<(&'a dyn AsRawDescriptor, WorkerToken)>,
828 }
829
830 impl<'a> EventManager<'a> {
new() -> Result<EventManager<'a>>831 pub fn new() -> Result<EventManager<'a>> {
832 Ok(EventManager {
833 wait_ctx: WaitContext::new()?,
834 events: vec![],
835 })
836 }
837
build_with( triggers: &[(&'a dyn AsRawDescriptor, WorkerToken)], ) -> Result<EventManager<'a>>838 pub fn build_with(
839 triggers: &[(&'a dyn AsRawDescriptor, WorkerToken)],
840 ) -> Result<EventManager<'a>> {
841 let mut manager = EventManager::new()?;
842 manager.wait_ctx.add_many(triggers)?;
843
844 for (descriptor, token) in triggers {
845 manager.events.push((*descriptor, *token));
846 }
847 Ok(manager)
848 }
849
add(&mut self, descriptor: &'a dyn AsRawDescriptor, token: WorkerToken) -> Result<()>850 pub fn add(&mut self, descriptor: &'a dyn AsRawDescriptor, token: WorkerToken) -> Result<()> {
851 self.wait_ctx.add(descriptor, token)?;
852 self.events.push((descriptor, token));
853 Ok(())
854 }
855
delete(&mut self, token: WorkerToken)856 pub fn delete(&mut self, token: WorkerToken) {
857 self.events.retain(|event| {
858 if event.1 == token {
859 self.wait_ctx.delete(event.0).ok();
860 return false;
861 }
862 true
863 });
864 }
865 }
866
867 #[derive(Serialize, Deserialize)]
868 struct WorkerSnapshot {
869 fence_state_snapshot: FenceStateSnapshot,
870 virtio_gpu_snapshot: VirtioGpuSnapshot,
871 }
872
873 struct WorkerActivateRequest {
874 resources: GpuActivationResources,
875 }
876
877 enum WorkerRequest {
878 Activate(WorkerActivateRequest),
879 Suspend,
880 Snapshot,
881 Restore(WorkerSnapshot),
882 }
883
884 enum WorkerResponse {
885 Ok,
886 Suspend(GpuDeactivationResources),
887 Snapshot(WorkerSnapshot),
888 }
889
890 struct GpuActivationResources {
891 mem: GuestMemory,
892 interrupt: Interrupt,
893 ctrl_queue: SharedQueueReader,
894 cursor_queue: LocalQueueReader,
895 }
896
897 struct GpuDeactivationResources {
898 queues: Option<Vec<Queue>>,
899 }
900
901 struct Worker {
902 request_receiver: mpsc::Receiver<WorkerRequest>,
903 response_sender: mpsc::Sender<anyhow::Result<WorkerResponse>>,
904 exit_evt_wrtube: SendTube,
905 gpu_control_tube: Tube,
906 resource_bridges: ResourceBridges,
907 suspend_evt: Event,
908 kill_evt: Event,
909 state: Frontend,
910 fence_state: Arc<Mutex<FenceState>>,
911 fence_handler_resources: Arc<Mutex<Option<FenceHandlerActivationResources<SharedQueueReader>>>>,
912 #[cfg(windows)]
913 gpu_display_wait_descriptor_ctrl_rd: RecvTube,
914 activation_resources: Option<GpuActivationResources>,
915 }
916
917 #[derive(Copy, Clone)]
918 enum WorkerStopReason {
919 Sleep,
920 Kill,
921 }
922
923 enum WorkerState {
924 Inactive,
925 Active,
926 Error,
927 }
928
929 impl Worker {
new( rutabaga_builder: RutabagaBuilder, rutabaga_server_descriptor: Option<RutabagaDescriptor>, display_backends: Vec<DisplayBackend>, display_params: Vec<GpuDisplayParameters>, display_event: Arc<AtomicBool>, mapper: Arc<Mutex<Option<Box<dyn SharedMemoryMapper>>>>, event_devices: Vec<EventDevice>, external_blob: bool, fixed_blob_mapping: bool, udmabuf: bool, request_receiver: mpsc::Receiver<WorkerRequest>, response_sender: mpsc::Sender<anyhow::Result<WorkerResponse>>, exit_evt_wrtube: SendTube, gpu_control_tube: Tube, resource_bridges: ResourceBridges, suspend_evt: Event, kill_evt: Event, #[cfg(windows)] mut wndproc_thread: Option<WindowProcedureThread>, #[cfg(windows)] gpu_display_wait_descriptor_ctrl_rd: RecvTube, #[cfg(windows)] gpu_display_wait_descriptor_ctrl_wr: SendTube, ) -> anyhow::Result<Worker>930 fn new(
931 rutabaga_builder: RutabagaBuilder,
932 rutabaga_server_descriptor: Option<RutabagaDescriptor>,
933 display_backends: Vec<DisplayBackend>,
934 display_params: Vec<GpuDisplayParameters>,
935 display_event: Arc<AtomicBool>,
936 mapper: Arc<Mutex<Option<Box<dyn SharedMemoryMapper>>>>,
937 event_devices: Vec<EventDevice>,
938 external_blob: bool,
939 fixed_blob_mapping: bool,
940 udmabuf: bool,
941 request_receiver: mpsc::Receiver<WorkerRequest>,
942 response_sender: mpsc::Sender<anyhow::Result<WorkerResponse>>,
943 exit_evt_wrtube: SendTube,
944 gpu_control_tube: Tube,
945 resource_bridges: ResourceBridges,
946 suspend_evt: Event,
947 kill_evt: Event,
948 #[cfg(windows)] mut wndproc_thread: Option<WindowProcedureThread>,
949 #[cfg(windows)] gpu_display_wait_descriptor_ctrl_rd: RecvTube,
950 #[cfg(windows)] gpu_display_wait_descriptor_ctrl_wr: SendTube,
951 ) -> anyhow::Result<Worker> {
952 let fence_state = Arc::new(Mutex::new(Default::default()));
953 let fence_handler_resources = Arc::new(Mutex::new(None));
954 let fence_handler =
955 create_fence_handler(fence_handler_resources.clone(), fence_state.clone());
956 let rutabaga = rutabaga_builder.build(fence_handler, rutabaga_server_descriptor)?;
957 let mut virtio_gpu = build(
958 &display_backends,
959 display_params,
960 display_event,
961 rutabaga,
962 mapper,
963 external_blob,
964 fixed_blob_mapping,
965 #[cfg(windows)]
966 &mut wndproc_thread,
967 udmabuf,
968 #[cfg(windows)]
969 gpu_display_wait_descriptor_ctrl_wr,
970 )
971 .ok_or_else(|| anyhow!("failed to build virtio gpu"))?;
972
973 for event_device in event_devices {
974 virtio_gpu
975 .import_event_device(event_device)
976 // We lost the `EventDevice`, so fail hard.
977 .context("failed to import event device")?;
978 }
979
980 Ok(Worker {
981 request_receiver,
982 response_sender,
983 exit_evt_wrtube,
984 gpu_control_tube,
985 resource_bridges,
986 suspend_evt,
987 kill_evt,
988 state: Frontend::new(virtio_gpu, fence_state.clone()),
989 fence_state,
990 fence_handler_resources,
991 #[cfg(windows)]
992 gpu_display_wait_descriptor_ctrl_rd,
993 activation_resources: None,
994 })
995 }
996
run(&mut self)997 fn run(&mut self) {
998 // This loop effectively only runs while the worker is inactive. Once activated via
999 // a `WorkerRequest::Activate`, the worker will remain in `run_until_sleep_or_exit()`
1000 // until suspended via `kill_evt` or `suspend_evt` being signaled.
1001 loop {
1002 let request = match self.request_receiver.recv() {
1003 Ok(r) => r,
1004 Err(_) => {
1005 info!("virtio gpu worker connection ended, exiting.");
1006 return;
1007 }
1008 };
1009
1010 match request {
1011 WorkerRequest::Activate(request) => {
1012 let response = self.on_activate(request).map(|_| WorkerResponse::Ok);
1013 self.response_sender
1014 .send(response)
1015 .expect("failed to send gpu worker response for activate");
1016
1017 let stop_reason = self
1018 .run_until_sleep_or_exit()
1019 .expect("failed to run gpu worker processing");
1020
1021 if let WorkerStopReason::Kill = stop_reason {
1022 break;
1023 }
1024 }
1025 WorkerRequest::Suspend => {
1026 let response = self.on_suspend().map(WorkerResponse::Suspend);
1027 self.response_sender
1028 .send(response)
1029 .expect("failed to send gpu worker response for suspend");
1030 }
1031 WorkerRequest::Snapshot => {
1032 let response = self.on_snapshot().map(WorkerResponse::Snapshot);
1033 self.response_sender
1034 .send(response)
1035 .expect("failed to send gpu worker response for snapshot");
1036 }
1037 WorkerRequest::Restore(snapshot) => {
1038 let response = self.on_restore(snapshot).map(|_| WorkerResponse::Ok);
1039 self.response_sender
1040 .send(response)
1041 .expect("failed to send gpu worker response for restore");
1042 }
1043 }
1044 }
1045 }
1046
on_activate(&mut self, request: WorkerActivateRequest) -> anyhow::Result<()>1047 fn on_activate(&mut self, request: WorkerActivateRequest) -> anyhow::Result<()> {
1048 self.fence_handler_resources
1049 .lock()
1050 .replace(FenceHandlerActivationResources {
1051 mem: request.resources.mem.clone(),
1052 ctrl_queue: request.resources.ctrl_queue.clone(),
1053 });
1054
1055 self.state
1056 .virtio_gpu
1057 .resume(&request.resources.mem)
1058 .context("gpu worker failed to activate virtio frontend")?;
1059
1060 self.activation_resources = Some(request.resources);
1061
1062 Ok(())
1063 }
1064
on_suspend(&mut self) -> anyhow::Result<GpuDeactivationResources>1065 fn on_suspend(&mut self) -> anyhow::Result<GpuDeactivationResources> {
1066 self.state
1067 .virtio_gpu
1068 .suspend()
1069 .context("failed to suspend VirtioGpu")?;
1070
1071 self.fence_handler_resources.lock().take();
1072
1073 let queues = if let Some(activation_resources) = self.activation_resources.take() {
1074 Some(vec![
1075 match Arc::try_unwrap(activation_resources.ctrl_queue.queue) {
1076 Ok(x) => x.into_inner(),
1077 Err(_) => panic!("too many refs on ctrl_queue"),
1078 },
1079 activation_resources.cursor_queue.queue.into_inner(),
1080 ])
1081 } else {
1082 None
1083 };
1084
1085 Ok(GpuDeactivationResources { queues })
1086 }
1087
on_snapshot(&mut self) -> anyhow::Result<WorkerSnapshot>1088 fn on_snapshot(&mut self) -> anyhow::Result<WorkerSnapshot> {
1089 Ok(WorkerSnapshot {
1090 fence_state_snapshot: self.fence_state.lock().snapshot(),
1091 virtio_gpu_snapshot: self
1092 .state
1093 .virtio_gpu
1094 .snapshot()
1095 .context("failed to snapshot VirtioGpu")?,
1096 })
1097 }
1098
on_restore(&mut self, snapshot: WorkerSnapshot) -> anyhow::Result<()>1099 fn on_restore(&mut self, snapshot: WorkerSnapshot) -> anyhow::Result<()> {
1100 self.fence_state
1101 .lock()
1102 .restore(snapshot.fence_state_snapshot);
1103
1104 self.state
1105 .virtio_gpu
1106 .restore(snapshot.virtio_gpu_snapshot)
1107 .context("failed to restore VirtioGpu")?;
1108
1109 Ok(())
1110 }
1111
run_until_sleep_or_exit(&mut self) -> anyhow::Result<WorkerStopReason>1112 fn run_until_sleep_or_exit(&mut self) -> anyhow::Result<WorkerStopReason> {
1113 let activation_resources = self
1114 .activation_resources
1115 .as_ref()
1116 .context("virtio gpu worker missing activation resources")?;
1117
1118 let display_desc =
1119 SafeDescriptor::try_from(&*self.state.display().borrow() as &dyn AsRawDescriptor)
1120 .context("failed getting event descriptor for display")?;
1121
1122 let ctrl_evt = activation_resources
1123 .ctrl_queue
1124 .queue
1125 .lock()
1126 .event()
1127 .try_clone()
1128 .context("failed to clone queue event")?;
1129 let cursor_evt = activation_resources
1130 .cursor_queue
1131 .queue
1132 .borrow()
1133 .event()
1134 .try_clone()
1135 .context("failed to clone queue event")?;
1136
1137 let mut event_manager = EventManager::build_with(&[
1138 (&ctrl_evt, WorkerToken::CtrlQueue),
1139 (&cursor_evt, WorkerToken::CursorQueue),
1140 (&display_desc, WorkerToken::Display),
1141 (
1142 self.gpu_control_tube.get_read_notifier(),
1143 WorkerToken::GpuControl,
1144 ),
1145 (&self.suspend_evt, WorkerToken::Sleep),
1146 (&self.kill_evt, WorkerToken::Kill),
1147 #[cfg(windows)]
1148 (
1149 self.gpu_display_wait_descriptor_ctrl_rd.get_read_notifier(),
1150 WorkerToken::DisplayDescriptorRequest,
1151 ),
1152 ])
1153 .context("failed creating gpu worker WaitContext")?;
1154
1155 if let Some(resample_evt) = activation_resources.interrupt.get_resample_evt() {
1156 event_manager
1157 .add(resample_evt, WorkerToken::InterruptResample)
1158 .context("failed adding interrupt resample event to WaitContext")?;
1159 }
1160
1161 let poll_desc: SafeDescriptor;
1162 if let Some(desc) = self.state.virtio_gpu.poll_descriptor() {
1163 poll_desc = desc;
1164 event_manager
1165 .add(&poll_desc, WorkerToken::VirtioGpuPoll)
1166 .context("failed adding poll event to WaitContext")?;
1167 }
1168
1169 self.resource_bridges
1170 .add_to_wait_context(&mut event_manager.wait_ctx);
1171
1172 // TODO(davidriley): The entire main loop processing is somewhat racey and incorrect with
1173 // respect to cursor vs control queue processing. As both currently and originally
1174 // written, while the control queue is only processed/read from after the the cursor queue
1175 // is finished, the entire queue will be processed at that time. The end effect of this
1176 // racyiness is that control queue descriptors that are issued after cursors descriptors
1177 // might be handled first instead of the other way around. In practice, the cursor queue
1178 // isn't used so this isn't a huge issue.
1179
1180 loop {
1181 let events = event_manager
1182 .wait_ctx
1183 .wait()
1184 .context("failed polling for gpu worker events")?;
1185
1186 let mut signal_used_cursor = false;
1187 let mut signal_used_ctrl = false;
1188 let mut ctrl_available = false;
1189 let mut display_available = false;
1190 let mut needs_config_interrupt = false;
1191
1192 // Remove event triggers that have been hung-up to prevent unnecessary worker wake-ups
1193 // (see b/244486346#comment62 for context).
1194 for event in events.iter().filter(|e| e.is_hungup) {
1195 error!(
1196 "unhandled virtio-gpu worker event hang-up detected: {:?}",
1197 event.token
1198 );
1199 event_manager.delete(event.token);
1200 }
1201
1202 for event in events.iter().filter(|e| e.is_readable) {
1203 match event.token {
1204 WorkerToken::CtrlQueue => {
1205 let _ = ctrl_evt.wait();
1206 // Set flag that control queue is available to be read, but defer reading
1207 // until rest of the events are processed.
1208 ctrl_available = true;
1209 }
1210 WorkerToken::CursorQueue => {
1211 let _ = cursor_evt.wait();
1212 if self.state.process_queue(
1213 &activation_resources.mem,
1214 &activation_resources.cursor_queue,
1215 ) {
1216 signal_used_cursor = true;
1217 }
1218 }
1219 WorkerToken::Display => {
1220 // We only need to process_display once-per-wake, regardless of how many
1221 // WorkerToken::Display events are received.
1222 display_available = true;
1223 }
1224 #[cfg(windows)]
1225 WorkerToken::DisplayDescriptorRequest => {
1226 if let Ok(req) = self
1227 .gpu_display_wait_descriptor_ctrl_rd
1228 .recv::<ModifyWaitContext>()
1229 {
1230 match req {
1231 ModifyWaitContext::Add(desc) => {
1232 if let Err(e) =
1233 event_manager.wait_ctx.add(&desc, WorkerToken::Display)
1234 {
1235 error!(
1236 "failed to add extra descriptor from display \
1237 to GPU worker wait context: {:?}",
1238 e
1239 )
1240 }
1241 }
1242 }
1243 } else {
1244 error!("failed to receive ModifyWaitContext request.")
1245 }
1246 }
1247 WorkerToken::GpuControl => {
1248 let req = self
1249 .gpu_control_tube
1250 .recv()
1251 .context("failed to recv from gpu control socket")?;
1252 let resp = self.state.process_gpu_control_command(req);
1253
1254 if let GpuControlResult::DisplaysUpdated = resp {
1255 needs_config_interrupt = true;
1256 }
1257
1258 self.gpu_control_tube
1259 .send(&resp)
1260 .context("failed to send gpu control socket response")?;
1261 }
1262 WorkerToken::ResourceBridge { index } => {
1263 self.resource_bridges.set_should_process(index);
1264 }
1265 WorkerToken::InterruptResample => {
1266 activation_resources.interrupt.interrupt_resample();
1267 }
1268 WorkerToken::VirtioGpuPoll => {
1269 self.state.event_poll();
1270 }
1271 WorkerToken::Sleep => {
1272 return Ok(WorkerStopReason::Sleep);
1273 }
1274 WorkerToken::Kill => {
1275 return Ok(WorkerStopReason::Kill);
1276 }
1277 }
1278 }
1279
1280 if display_available {
1281 match self.state.process_display() {
1282 ProcessDisplayResult::CloseRequested => {
1283 let _ = self.exit_evt_wrtube.send::<VmEventType>(&VmEventType::Exit);
1284 }
1285 ProcessDisplayResult::Error(_e) => {
1286 base::error!("Display processing failed, disabling display event handler.");
1287 event_manager.delete(WorkerToken::Display);
1288 }
1289 ProcessDisplayResult::Success => (),
1290 };
1291 }
1292
1293 if ctrl_available
1294 && self
1295 .state
1296 .process_queue(&activation_resources.mem, &activation_resources.ctrl_queue)
1297 {
1298 signal_used_ctrl = true;
1299 }
1300
1301 // Process the entire control queue before the resource bridge in case a resource is
1302 // created or destroyed by the control queue. Processing the resource bridge first may
1303 // lead to a race condition.
1304 // TODO(davidriley): This is still inherently racey if both the control queue request
1305 // and the resource bridge request come in at the same time after the control queue is
1306 // processed above and before the corresponding bridge is processed below.
1307 self.resource_bridges
1308 .process_resource_bridges(&mut self.state, &mut event_manager.wait_ctx);
1309
1310 if signal_used_ctrl {
1311 activation_resources.ctrl_queue.signal_used();
1312 }
1313
1314 if signal_used_cursor {
1315 activation_resources.cursor_queue.signal_used();
1316 }
1317
1318 if needs_config_interrupt {
1319 activation_resources.interrupt.signal_config_changed();
1320 }
1321 }
1322 }
1323 }
1324
1325 /// Indicates a backend that should be tried for the gpu to use for display.
1326 ///
1327 /// Several instances of this enum are used in an ordered list to give the gpu device many backends
1328 /// to use as fallbacks in case some do not work.
1329 #[derive(Clone)]
1330 pub enum DisplayBackend {
1331 #[cfg(any(target_os = "android", target_os = "linux"))]
1332 /// Use the wayland backend with the given socket path if given.
1333 Wayland(Option<PathBuf>),
1334 #[cfg(any(target_os = "android", target_os = "linux"))]
1335 /// Open a connection to the X server at the given display if given.
1336 X(Option<String>),
1337 /// Emulate a display without actually displaying it.
1338 Stub,
1339 #[cfg(windows)]
1340 /// Open a window using WinAPI.
1341 WinApi,
1342 #[cfg(feature = "android_display")]
1343 /// The display buffer is backed by an Android surface. The surface is set via an AIDL service
1344 /// that the backend hosts. Currently, the AIDL service is registered to the service manager
1345 /// using the name given here. The entity holding the surface is expected to locate the service
1346 /// via this name, and pass the surface to it.
1347 Android(String),
1348 }
1349
1350 impl DisplayBackend {
build( &self, #[cfg(windows)] wndproc_thread: &mut Option<WindowProcedureThread>, #[cfg(windows)] gpu_display_wait_descriptor_ctrl: SendTube, ) -> std::result::Result<GpuDisplay, GpuDisplayError>1351 fn build(
1352 &self,
1353 #[cfg(windows)] wndproc_thread: &mut Option<WindowProcedureThread>,
1354 #[cfg(windows)] gpu_display_wait_descriptor_ctrl: SendTube,
1355 ) -> std::result::Result<GpuDisplay, GpuDisplayError> {
1356 match self {
1357 #[cfg(any(target_os = "android", target_os = "linux"))]
1358 DisplayBackend::Wayland(path) => GpuDisplay::open_wayland(path.as_ref()),
1359 #[cfg(any(target_os = "android", target_os = "linux"))]
1360 DisplayBackend::X(display) => GpuDisplay::open_x(display.as_deref()),
1361 DisplayBackend::Stub => GpuDisplay::open_stub(),
1362 #[cfg(windows)]
1363 DisplayBackend::WinApi => match wndproc_thread.take() {
1364 Some(wndproc_thread) => GpuDisplay::open_winapi(
1365 wndproc_thread,
1366 /* win_metrics= */ None,
1367 gpu_display_wait_descriptor_ctrl,
1368 None,
1369 ),
1370 None => {
1371 error!("wndproc_thread is none");
1372 Err(GpuDisplayError::Allocate)
1373 }
1374 },
1375 #[cfg(feature = "android_display")]
1376 DisplayBackend::Android(service_name) => GpuDisplay::open_android(service_name),
1377 }
1378 }
1379 }
1380
1381 pub struct Gpu {
1382 exit_evt_wrtube: SendTube,
1383 pub gpu_control_tube: Option<Tube>,
1384 mapper: Arc<Mutex<Option<Box<dyn SharedMemoryMapper>>>>,
1385 resource_bridges: Option<ResourceBridges>,
1386 event_devices: Option<Vec<EventDevice>>,
1387 worker_suspend_evt: Option<Event>,
1388 worker_request_sender: Option<mpsc::Sender<WorkerRequest>>,
1389 worker_response_receiver: Option<mpsc::Receiver<anyhow::Result<WorkerResponse>>>,
1390 worker_state: WorkerState,
1391 worker_thread: Option<WorkerThread<()>>,
1392 display_backends: Vec<DisplayBackend>,
1393 display_params: Vec<GpuDisplayParameters>,
1394 display_event: Arc<AtomicBool>,
1395 rutabaga_builder: RutabagaBuilder,
1396 pci_address: Option<PciAddress>,
1397 pci_bar_size: u64,
1398 external_blob: bool,
1399 fixed_blob_mapping: bool,
1400 rutabaga_component: RutabagaComponentType,
1401 #[cfg(windows)]
1402 wndproc_thread: Option<WindowProcedureThread>,
1403 base_features: u64,
1404 udmabuf: bool,
1405 rutabaga_server_descriptor: Option<SafeDescriptor>,
1406 #[cfg(windows)]
1407 /// Because the Windows GpuDisplay can't expose an epollfd, it has to inform the GPU worker
1408 /// which descriptors to add to its wait context. That's what this Tube is used for (it is
1409 /// provided to each display backend.
1410 gpu_display_wait_descriptor_ctrl_wr: SendTube,
1411 #[cfg(windows)]
1412 /// The GPU worker uses this Tube to receive the descriptors that should be added to its wait
1413 /// context.
1414 gpu_display_wait_descriptor_ctrl_rd: Option<RecvTube>,
1415 capset_mask: u64,
1416 #[cfg(any(target_os = "android", target_os = "linux"))]
1417 gpu_cgroup_path: Option<PathBuf>,
1418 }
1419
1420 impl Gpu {
new( exit_evt_wrtube: SendTube, gpu_control_tube: Tube, resource_bridges: Vec<Tube>, display_backends: Vec<DisplayBackend>, gpu_parameters: &GpuParameters, rutabaga_server_descriptor: Option<SafeDescriptor>, event_devices: Vec<EventDevice>, base_features: u64, channels: &BTreeMap<String, PathBuf>, #[cfg(windows)] wndproc_thread: WindowProcedureThread, #[cfg(any(target_os = "android", target_os = "linux"))] gpu_cgroup_path: Option<&PathBuf>, ) -> Gpu1421 pub fn new(
1422 exit_evt_wrtube: SendTube,
1423 gpu_control_tube: Tube,
1424 resource_bridges: Vec<Tube>,
1425 display_backends: Vec<DisplayBackend>,
1426 gpu_parameters: &GpuParameters,
1427 rutabaga_server_descriptor: Option<SafeDescriptor>,
1428 event_devices: Vec<EventDevice>,
1429 base_features: u64,
1430 channels: &BTreeMap<String, PathBuf>,
1431 #[cfg(windows)] wndproc_thread: WindowProcedureThread,
1432 #[cfg(any(target_os = "android", target_os = "linux"))] gpu_cgroup_path: Option<&PathBuf>,
1433 ) -> Gpu {
1434 let mut display_params = gpu_parameters.display_params.clone();
1435 if display_params.is_empty() {
1436 display_params.push(Default::default());
1437 }
1438 let (display_width, display_height) = display_params[0].get_virtual_display_size();
1439
1440 let mut rutabaga_channels: Vec<RutabagaChannel> = Vec::new();
1441 for (channel_name, path) in channels {
1442 match &channel_name[..] {
1443 "" => rutabaga_channels.push(RutabagaChannel {
1444 base_channel: path.clone(),
1445 channel_type: RUTABAGA_CHANNEL_TYPE_WAYLAND,
1446 }),
1447 "mojo" => rutabaga_channels.push(RutabagaChannel {
1448 base_channel: path.clone(),
1449 channel_type: RUTABAGA_CHANNEL_TYPE_CAMERA,
1450 }),
1451 _ => error!("unknown rutabaga channel"),
1452 }
1453 }
1454
1455 let rutabaga_channels_opt = Some(rutabaga_channels);
1456 let component = match gpu_parameters.mode {
1457 GpuMode::Mode2D => RutabagaComponentType::Rutabaga2D,
1458 #[cfg(feature = "virgl_renderer")]
1459 GpuMode::ModeVirglRenderer => RutabagaComponentType::VirglRenderer,
1460 #[cfg(feature = "gfxstream")]
1461 GpuMode::ModeGfxstream => RutabagaComponentType::Gfxstream,
1462 };
1463
1464 // Only allow virglrenderer to fork its own render server when explicitly requested.
1465 // Caller can enforce its own restrictions (e.g. not allowed when sandboxed) and set the
1466 // allow flag appropriately.
1467 let use_render_server = rutabaga_server_descriptor.is_some()
1468 || gpu_parameters.allow_implicit_render_server_exec;
1469
1470 let rutabaga_wsi = match gpu_parameters.wsi {
1471 Some(GpuWsi::Vulkan) => RutabagaWsi::VulkanSwapchain,
1472 _ => RutabagaWsi::Surfaceless,
1473 };
1474
1475 let rutabaga_builder = RutabagaBuilder::new(component, gpu_parameters.capset_mask)
1476 .set_display_width(display_width)
1477 .set_display_height(display_height)
1478 .set_rutabaga_channels(rutabaga_channels_opt)
1479 .set_use_egl(gpu_parameters.renderer_use_egl)
1480 .set_use_gles(gpu_parameters.renderer_use_gles)
1481 .set_use_glx(gpu_parameters.renderer_use_glx)
1482 .set_use_surfaceless(gpu_parameters.renderer_use_surfaceless)
1483 .set_use_vulkan(gpu_parameters.use_vulkan.unwrap_or_default())
1484 .set_wsi(rutabaga_wsi)
1485 .set_use_external_blob(gpu_parameters.external_blob)
1486 .set_use_system_blob(gpu_parameters.system_blob)
1487 .set_use_render_server(use_render_server)
1488 .set_renderer_features(gpu_parameters.renderer_features.clone());
1489
1490 #[cfg(windows)]
1491 let (gpu_display_wait_descriptor_ctrl_wr, gpu_display_wait_descriptor_ctrl_rd) =
1492 Tube::directional_pair().expect("failed to create wait descriptor control pair.");
1493
1494 Gpu {
1495 exit_evt_wrtube,
1496 gpu_control_tube: Some(gpu_control_tube),
1497 mapper: Arc::new(Mutex::new(None)),
1498 resource_bridges: Some(ResourceBridges::new(resource_bridges)),
1499 event_devices: Some(event_devices),
1500 worker_request_sender: None,
1501 worker_response_receiver: None,
1502 worker_suspend_evt: None,
1503 worker_state: WorkerState::Inactive,
1504 worker_thread: None,
1505 display_backends,
1506 display_params,
1507 display_event: Arc::new(AtomicBool::new(false)),
1508 rutabaga_builder,
1509 pci_address: gpu_parameters.pci_address,
1510 pci_bar_size: gpu_parameters.pci_bar_size,
1511 external_blob: gpu_parameters.external_blob,
1512 fixed_blob_mapping: gpu_parameters.fixed_blob_mapping,
1513 rutabaga_component: component,
1514 #[cfg(windows)]
1515 wndproc_thread: Some(wndproc_thread),
1516 base_features,
1517 udmabuf: gpu_parameters.udmabuf,
1518 rutabaga_server_descriptor,
1519 #[cfg(windows)]
1520 gpu_display_wait_descriptor_ctrl_wr,
1521 #[cfg(windows)]
1522 gpu_display_wait_descriptor_ctrl_rd: Some(gpu_display_wait_descriptor_ctrl_rd),
1523 capset_mask: gpu_parameters.capset_mask,
1524 #[cfg(any(target_os = "android", target_os = "linux"))]
1525 gpu_cgroup_path: gpu_cgroup_path.cloned(),
1526 }
1527 }
1528
1529 /// Initializes the internal device state so that it can begin processing virtqueues.
1530 ///
1531 /// Only used by vhost-user GPU.
initialize_frontend( &mut self, fence_state: Arc<Mutex<FenceState>>, fence_handler: RutabagaFenceHandler, mapper: Arc<Mutex<Option<Box<dyn SharedMemoryMapper>>>>, ) -> Option<Frontend>1532 pub fn initialize_frontend(
1533 &mut self,
1534 fence_state: Arc<Mutex<FenceState>>,
1535 fence_handler: RutabagaFenceHandler,
1536 mapper: Arc<Mutex<Option<Box<dyn SharedMemoryMapper>>>>,
1537 ) -> Option<Frontend> {
1538 let rutabaga_server_descriptor = self.rutabaga_server_descriptor.as_ref().map(|d| {
1539 to_rutabaga_descriptor(d.try_clone().expect("failed to clone server descriptor"))
1540 });
1541 let rutabaga = self
1542 .rutabaga_builder
1543 .clone()
1544 .build(fence_handler, rutabaga_server_descriptor)
1545 .map_err(|e| error!("failed to build rutabaga {}", e))
1546 .ok()?;
1547
1548 let mut virtio_gpu = build(
1549 &self.display_backends,
1550 self.display_params.clone(),
1551 self.display_event.clone(),
1552 rutabaga,
1553 mapper,
1554 self.external_blob,
1555 self.fixed_blob_mapping,
1556 #[cfg(windows)]
1557 &mut self.wndproc_thread,
1558 self.udmabuf,
1559 #[cfg(windows)]
1560 self.gpu_display_wait_descriptor_ctrl_wr
1561 .try_clone()
1562 .expect("failed to clone wait context control channel"),
1563 )?;
1564
1565 for event_device in self.event_devices.take().expect("missing event_devices") {
1566 virtio_gpu
1567 .import_event_device(event_device)
1568 // We lost the `EventDevice`, so fail hard.
1569 .expect("failed to import event device");
1570 }
1571
1572 Some(Frontend::new(virtio_gpu, fence_state))
1573 }
1574
1575 // This is not invoked when running with vhost-user GPU.
start_worker_thread(&mut self)1576 fn start_worker_thread(&mut self) {
1577 let suspend_evt = Event::new().unwrap();
1578 let suspend_evt_copy = suspend_evt
1579 .try_clone()
1580 .context("error cloning suspend event")
1581 .unwrap();
1582
1583 let exit_evt_wrtube = self
1584 .exit_evt_wrtube
1585 .try_clone()
1586 .context("error cloning exit tube")
1587 .unwrap();
1588
1589 let gpu_control_tube = self
1590 .gpu_control_tube
1591 .take()
1592 .context("gpu_control_tube is none")
1593 .unwrap();
1594
1595 let resource_bridges = self
1596 .resource_bridges
1597 .take()
1598 .context("resource_bridges is none")
1599 .unwrap();
1600
1601 let display_backends = self.display_backends.clone();
1602 let display_params = self.display_params.clone();
1603 let display_event = self.display_event.clone();
1604 let event_devices = self.event_devices.take().expect("missing event_devices");
1605 let external_blob = self.external_blob;
1606 let fixed_blob_mapping = self.fixed_blob_mapping;
1607 let udmabuf = self.udmabuf;
1608
1609 #[cfg(windows)]
1610 let mut wndproc_thread = self.wndproc_thread.take();
1611
1612 #[cfg(windows)]
1613 let gpu_display_wait_descriptor_ctrl_wr = self
1614 .gpu_display_wait_descriptor_ctrl_wr
1615 .try_clone()
1616 .expect("failed to clone wait context ctrl channel");
1617
1618 #[cfg(windows)]
1619 let gpu_display_wait_descriptor_ctrl_rd = self
1620 .gpu_display_wait_descriptor_ctrl_rd
1621 .take()
1622 .expect("failed to take gpu_display_wait_descriptor_ctrl_rd");
1623
1624 #[cfg(any(target_os = "android", target_os = "linux"))]
1625 let gpu_cgroup_path = self.gpu_cgroup_path.clone();
1626
1627 let mapper = Arc::clone(&self.mapper);
1628
1629 let rutabaga_builder = self.rutabaga_builder.clone();
1630 let rutabaga_server_descriptor = self.rutabaga_server_descriptor.as_ref().map(|d| {
1631 to_rutabaga_descriptor(d.try_clone().expect("failed to clone server descriptor"))
1632 });
1633
1634 let (init_finished_tx, init_finished_rx) = mpsc::channel();
1635
1636 let (worker_request_sender, worker_request_receiver) = mpsc::channel();
1637 let (worker_response_sender, worker_response_receiver) = mpsc::channel();
1638
1639 let worker_thread = WorkerThread::start("v_gpu", move |kill_evt| {
1640 #[cfg(any(target_os = "android", target_os = "linux"))]
1641 if let Some(cgroup_path) = gpu_cgroup_path {
1642 move_task_to_cgroup(cgroup_path, base::gettid())
1643 .expect("Failed to move v_gpu into requested cgroup");
1644 }
1645
1646 let mut worker = Worker::new(
1647 rutabaga_builder,
1648 rutabaga_server_descriptor,
1649 display_backends,
1650 display_params,
1651 display_event,
1652 mapper,
1653 event_devices,
1654 external_blob,
1655 fixed_blob_mapping,
1656 udmabuf,
1657 worker_request_receiver,
1658 worker_response_sender,
1659 exit_evt_wrtube,
1660 gpu_control_tube,
1661 resource_bridges,
1662 suspend_evt_copy,
1663 kill_evt,
1664 #[cfg(windows)]
1665 wndproc_thread,
1666 #[cfg(windows)]
1667 gpu_display_wait_descriptor_ctrl_rd,
1668 #[cfg(windows)]
1669 gpu_display_wait_descriptor_ctrl_wr,
1670 )
1671 .expect("Failed to create virtio gpu worker thread");
1672
1673 // Tell the parent thread that the init phase is complete.
1674 let _ = init_finished_tx.send(());
1675
1676 worker.run()
1677 });
1678
1679 self.worker_request_sender = Some(worker_request_sender);
1680 self.worker_response_receiver = Some(worker_response_receiver);
1681 self.worker_suspend_evt = Some(suspend_evt);
1682 self.worker_state = WorkerState::Inactive;
1683 self.worker_thread = Some(worker_thread);
1684
1685 match init_finished_rx.recv() {
1686 Ok(()) => {}
1687 Err(mpsc::RecvError) => panic!("virtio-gpu worker thread init failed"),
1688 }
1689 }
1690
stop_worker_thread(&mut self)1691 fn stop_worker_thread(&mut self) {
1692 self.worker_request_sender.take();
1693 self.worker_response_receiver.take();
1694 self.worker_suspend_evt.take();
1695 if let Some(worker_thread) = self.worker_thread.take() {
1696 worker_thread.stop();
1697 }
1698 }
1699
get_config(&self) -> virtio_gpu_config1700 fn get_config(&self) -> virtio_gpu_config {
1701 let mut events_read = 0;
1702
1703 if self.display_event.load(Ordering::Relaxed) {
1704 events_read |= VIRTIO_GPU_EVENT_DISPLAY;
1705 }
1706
1707 let num_capsets = match self.capset_mask {
1708 0 => {
1709 match self.rutabaga_component {
1710 RutabagaComponentType::Rutabaga2D => 0,
1711 _ => {
1712 #[allow(unused_mut)]
1713 let mut num_capsets = 0;
1714
1715 // Three capsets for virgl_renderer
1716 #[cfg(feature = "virgl_renderer")]
1717 {
1718 num_capsets += 3;
1719 }
1720
1721 // One capset for gfxstream
1722 #[cfg(feature = "gfxstream")]
1723 {
1724 num_capsets += 1;
1725 }
1726
1727 num_capsets
1728 }
1729 }
1730 }
1731 _ => self.capset_mask.count_ones(),
1732 };
1733
1734 virtio_gpu_config {
1735 events_read: Le32::from(events_read),
1736 events_clear: Le32::from(0),
1737 num_scanouts: Le32::from(VIRTIO_GPU_MAX_SCANOUTS as u32),
1738 num_capsets: Le32::from(num_capsets),
1739 }
1740 }
1741
1742 /// Send a request to exit the process to VMM.
send_exit_evt(&self) -> anyhow::Result<()>1743 pub fn send_exit_evt(&self) -> anyhow::Result<()> {
1744 self.exit_evt_wrtube
1745 .send::<VmEventType>(&VmEventType::Exit)
1746 .context("failed to send exit event")
1747 }
1748 }
1749
1750 impl VirtioDevice for Gpu {
keep_rds(&self) -> Vec<RawDescriptor>1751 fn keep_rds(&self) -> Vec<RawDescriptor> {
1752 let mut keep_rds = Vec::new();
1753
1754 // To find the RawDescriptor associated with stdout and stderr on Windows is difficult.
1755 // Resource bridges are used only for Wayland displays. There is also no meaningful way
1756 // casting the underlying DMA buffer wrapped in File to a copyable RawDescriptor.
1757 // TODO(davidriley): Remove once virgl has another path to include
1758 // debugging logs.
1759 #[cfg(any(target_os = "android", target_os = "linux"))]
1760 if cfg!(debug_assertions) {
1761 keep_rds.push(libc::STDOUT_FILENO);
1762 keep_rds.push(libc::STDERR_FILENO);
1763 }
1764
1765 if let Some(ref mapper) = *self.mapper.lock() {
1766 if let Some(descriptor) = mapper.as_raw_descriptor() {
1767 keep_rds.push(descriptor);
1768 }
1769 }
1770
1771 if let Some(ref rutabaga_server_descriptor) = self.rutabaga_server_descriptor {
1772 keep_rds.push(rutabaga_server_descriptor.as_raw_descriptor());
1773 }
1774
1775 keep_rds.push(self.exit_evt_wrtube.as_raw_descriptor());
1776
1777 if let Some(gpu_control_tube) = &self.gpu_control_tube {
1778 keep_rds.push(gpu_control_tube.as_raw_descriptor());
1779 }
1780
1781 if let Some(resource_bridges) = &self.resource_bridges {
1782 resource_bridges.append_raw_descriptors(&mut keep_rds);
1783 }
1784
1785 for event_device in self.event_devices.iter().flatten() {
1786 keep_rds.push(event_device.as_raw_descriptor());
1787 }
1788
1789 keep_rds
1790 }
1791
device_type(&self) -> DeviceType1792 fn device_type(&self) -> DeviceType {
1793 DeviceType::Gpu
1794 }
1795
queue_max_sizes(&self) -> &[u16]1796 fn queue_max_sizes(&self) -> &[u16] {
1797 QUEUE_SIZES
1798 }
1799
features(&self) -> u641800 fn features(&self) -> u64 {
1801 let mut virtio_gpu_features = 1 << VIRTIO_GPU_F_EDID;
1802
1803 // If a non-2D component is specified, enable 3D features. It is possible to run display
1804 // contexts without 3D backend (i.e, gfxstream / virglrender), so check for that too.
1805 if self.rutabaga_component != RutabagaComponentType::Rutabaga2D || self.capset_mask != 0 {
1806 virtio_gpu_features |= 1 << VIRTIO_GPU_F_VIRGL
1807 | 1 << VIRTIO_GPU_F_RESOURCE_UUID
1808 | 1 << VIRTIO_GPU_F_RESOURCE_BLOB
1809 | 1 << VIRTIO_GPU_F_CONTEXT_INIT
1810 | 1 << VIRTIO_GPU_F_EDID;
1811
1812 if self.udmabuf {
1813 virtio_gpu_features |= 1 << VIRTIO_GPU_F_CREATE_GUEST_HANDLE;
1814 }
1815
1816 // New experimental/unstable feature, not upstreamed.
1817 // Safe to enable because guest must explicitly opt-in.
1818 virtio_gpu_features |= 1 << VIRTIO_GPU_F_FENCE_PASSING;
1819 }
1820
1821 self.base_features | virtio_gpu_features
1822 }
1823
ack_features(&mut self, value: u64)1824 fn ack_features(&mut self, value: u64) {
1825 let _ = value;
1826 }
1827
read_config(&self, offset: u64, data: &mut [u8])1828 fn read_config(&self, offset: u64, data: &mut [u8]) {
1829 copy_config(data, 0, self.get_config().as_bytes(), offset);
1830 }
1831
write_config(&mut self, offset: u64, data: &[u8])1832 fn write_config(&mut self, offset: u64, data: &[u8]) {
1833 let mut cfg = self.get_config();
1834 copy_config(cfg.as_bytes_mut(), offset, data, 0);
1835 if (cfg.events_clear.to_native() & VIRTIO_GPU_EVENT_DISPLAY) != 0 {
1836 self.display_event.store(false, Ordering::Relaxed);
1837 }
1838 }
1839
on_device_sandboxed(&mut self)1840 fn on_device_sandboxed(&mut self) {
1841 // Unlike most Virtio devices which start their worker thread in activate(),
1842 // the Gpu's worker thread is started earlier here so that rutabaga and the
1843 // underlying render server have a chance to initialize before the guest OS
1844 // starts. This is needed because the Virtio GPU kernel module has a timeout
1845 // for some calls during initialization and some host GPU drivers have been
1846 // observed to be extremely slow to initialize on fresh GCE instances. The
1847 // entire worker thread is started here (as opposed to just initializing
1848 // rutabaga and the underlying render server) as OpenGL based renderers may
1849 // expect to be initialized on the same thread that later processes commands.
1850 self.start_worker_thread();
1851 }
1852
activate( &mut self, mem: GuestMemory, interrupt: Interrupt, mut queues: BTreeMap<usize, Queue>, ) -> anyhow::Result<()>1853 fn activate(
1854 &mut self,
1855 mem: GuestMemory,
1856 interrupt: Interrupt,
1857 mut queues: BTreeMap<usize, Queue>,
1858 ) -> anyhow::Result<()> {
1859 if queues.len() != QUEUE_SIZES.len() {
1860 return Err(anyhow!(
1861 "expected {} queues, got {}",
1862 QUEUE_SIZES.len(),
1863 queues.len()
1864 ));
1865 }
1866
1867 let ctrl_queue = SharedQueueReader::new(queues.remove(&0).unwrap());
1868 let cursor_queue = LocalQueueReader::new(queues.remove(&1).unwrap());
1869
1870 self.worker_request_sender
1871 .as_ref()
1872 .context("worker thread missing on activate?")?
1873 .send(WorkerRequest::Activate(WorkerActivateRequest {
1874 resources: GpuActivationResources {
1875 mem,
1876 interrupt,
1877 ctrl_queue,
1878 cursor_queue,
1879 },
1880 }))
1881 .map_err(|e| anyhow!("failed to send virtio gpu worker activate request: {:?}", e))?;
1882
1883 self.worker_response_receiver
1884 .as_ref()
1885 .context("worker thread missing on activate?")?
1886 .recv()
1887 .inspect(|_| self.worker_state = WorkerState::Active)
1888 .inspect_err(|_| self.worker_state = WorkerState::Error)
1889 .context("failed to receive response for virtio gpu worker resume request")??;
1890
1891 Ok(())
1892 }
1893
pci_address(&self) -> Option<PciAddress>1894 fn pci_address(&self) -> Option<PciAddress> {
1895 self.pci_address
1896 }
1897
get_shared_memory_region(&self) -> Option<SharedMemoryRegion>1898 fn get_shared_memory_region(&self) -> Option<SharedMemoryRegion> {
1899 Some(SharedMemoryRegion {
1900 id: VIRTIO_GPU_SHM_ID_HOST_VISIBLE,
1901 length: self.pci_bar_size,
1902 })
1903 }
1904
set_shared_memory_mapper(&mut self, mapper: Box<dyn SharedMemoryMapper>)1905 fn set_shared_memory_mapper(&mut self, mapper: Box<dyn SharedMemoryMapper>) {
1906 self.mapper.lock().replace(mapper);
1907 }
1908
expose_shmem_descriptors_with_viommu(&self) -> bool1909 fn expose_shmem_descriptors_with_viommu(&self) -> bool {
1910 // TODO(b/323368701): integrate with fixed_blob_mapping so this can always return true.
1911 !self.fixed_blob_mapping
1912 }
1913
get_shared_memory_prepare_type(&mut self) -> SharedMemoryPrepareType1914 fn get_shared_memory_prepare_type(&mut self) -> SharedMemoryPrepareType {
1915 if self.fixed_blob_mapping {
1916 let cache_type = if cfg!(feature = "noncoherent-dma") {
1917 MemCacheType::CacheNonCoherent
1918 } else {
1919 MemCacheType::CacheCoherent
1920 };
1921 SharedMemoryPrepareType::SingleMappingOnFirst(cache_type)
1922 } else {
1923 SharedMemoryPrepareType::DynamicPerMapping
1924 }
1925 }
1926
1927 // Notes on sleep/wake/snapshot/restore functionality.
1928 //
1929 // * Only 2d mode is supported so far.
1930 // * We only snapshot the state relevant to the virtio-gpu 2d mode protocol (i.e. scanouts,
1931 // resources, fences).
1932 // * The GpuDisplay is recreated from scratch, we don't want to snapshot the state of a
1933 // Wayland socket (for example).
1934 // * No state about pending virtio requests needs to be snapshotted because the 2d backend
1935 // completes them synchronously.
virtio_sleep(&mut self) -> anyhow::Result<Option<BTreeMap<usize, Queue>>>1936 fn virtio_sleep(&mut self) -> anyhow::Result<Option<BTreeMap<usize, Queue>>> {
1937 match self.worker_state {
1938 WorkerState::Error => {
1939 return Err(anyhow!(
1940 "failed to sleep virtio gpu worker which is in error state"
1941 ));
1942 }
1943 WorkerState::Inactive => {
1944 return Ok(None);
1945 }
1946 _ => (),
1947 };
1948
1949 if let (
1950 Some(worker_request_sender),
1951 Some(worker_response_receiver),
1952 Some(worker_suspend_evt),
1953 ) = (
1954 &self.worker_request_sender,
1955 &self.worker_response_receiver,
1956 &self.worker_suspend_evt,
1957 ) {
1958 worker_request_sender
1959 .send(WorkerRequest::Suspend)
1960 .map_err(|e| {
1961 anyhow!(
1962 "failed to send suspend request to virtio gpu worker: {:?}",
1963 e
1964 )
1965 })?;
1966
1967 worker_suspend_evt
1968 .signal()
1969 .context("failed to signal virtio gpu worker suspend event")?;
1970
1971 let response = worker_response_receiver
1972 .recv()
1973 .inspect(|_| self.worker_state = WorkerState::Inactive)
1974 .inspect_err(|_| self.worker_state = WorkerState::Error)
1975 .context("failed to receive response for virtio gpu worker suspend request")??;
1976
1977 worker_suspend_evt
1978 .reset()
1979 .context("failed to reset virtio gpu worker suspend event")?;
1980
1981 match response {
1982 WorkerResponse::Suspend(deactivation_resources) => Ok(deactivation_resources
1983 .queues
1984 .map(|q| q.into_iter().enumerate().collect())),
1985 _ => {
1986 panic!("unexpected response from virtio gpu worker sleep request");
1987 }
1988 }
1989 } else {
1990 Err(anyhow!("virtio gpu worker not available for sleep"))
1991 }
1992 }
1993
virtio_wake( &mut self, queues_state: Option<(GuestMemory, Interrupt, BTreeMap<usize, Queue>)>, ) -> anyhow::Result<()>1994 fn virtio_wake(
1995 &mut self,
1996 queues_state: Option<(GuestMemory, Interrupt, BTreeMap<usize, Queue>)>,
1997 ) -> anyhow::Result<()> {
1998 match self.worker_state {
1999 WorkerState::Error => {
2000 return Err(anyhow!(
2001 "failed to wake virtio gpu worker which is in error state"
2002 ));
2003 }
2004 WorkerState::Active => {
2005 return Ok(());
2006 }
2007 _ => (),
2008 };
2009
2010 match queues_state {
2011 None => Ok(()),
2012 Some((mem, interrupt, queues)) => {
2013 // TODO(khei): activate is just what we want at the moment, but we should probably
2014 // move it into a "start workers" function to make it obvious that it isn't
2015 // strictly used for activate events.
2016 self.activate(mem, interrupt, queues)?;
2017 Ok(())
2018 }
2019 }
2020 }
2021
virtio_snapshot(&mut self) -> anyhow::Result<serde_json::Value>2022 fn virtio_snapshot(&mut self) -> anyhow::Result<serde_json::Value> {
2023 match self.worker_state {
2024 WorkerState::Error => {
2025 return Err(anyhow!(
2026 "failed to snapshot virtio gpu worker which is in error state"
2027 ));
2028 }
2029 WorkerState::Active => {
2030 return Err(anyhow!(
2031 "failed to snapshot virtio gpu worker which is in active state"
2032 ));
2033 }
2034 _ => (),
2035 };
2036
2037 if let (Some(worker_request_sender), Some(worker_response_receiver)) =
2038 (&self.worker_request_sender, &self.worker_response_receiver)
2039 {
2040 worker_request_sender
2041 .send(WorkerRequest::Snapshot)
2042 .map_err(|e| {
2043 anyhow!(
2044 "failed to send snapshot request to virtio gpu worker: {:?}",
2045 e
2046 )
2047 })?;
2048
2049 match worker_response_receiver
2050 .recv()
2051 .inspect_err(|_| self.worker_state = WorkerState::Error)
2052 .context("failed to receive response for virtio gpu worker suspend request")??
2053 {
2054 WorkerResponse::Snapshot(snapshot) => Ok(serde_json::to_value(snapshot)?),
2055 _ => {
2056 panic!("unexpected response from virtio gpu worker sleep request");
2057 }
2058 }
2059 } else {
2060 Err(anyhow!("virtio gpu worker not available for snapshot"))
2061 }
2062 }
2063
virtio_restore(&mut self, data: serde_json::Value) -> anyhow::Result<()>2064 fn virtio_restore(&mut self, data: serde_json::Value) -> anyhow::Result<()> {
2065 match self.worker_state {
2066 WorkerState::Error => {
2067 return Err(anyhow!(
2068 "failed to restore virtio gpu worker which is in error state"
2069 ));
2070 }
2071 WorkerState::Active => {
2072 return Err(anyhow!(
2073 "failed to restore virtio gpu worker which is in active state"
2074 ));
2075 }
2076 _ => (),
2077 };
2078
2079 let snapshot: WorkerSnapshot = serde_json::from_value(data)?;
2080
2081 if let (Some(worker_request_sender), Some(worker_response_receiver)) =
2082 (&self.worker_request_sender, &self.worker_response_receiver)
2083 {
2084 worker_request_sender
2085 .send(WorkerRequest::Restore(snapshot))
2086 .map_err(|e| {
2087 anyhow!(
2088 "failed to send suspend request to virtio gpu worker: {:?}",
2089 e
2090 )
2091 })?;
2092
2093 let response = worker_response_receiver
2094 .recv()
2095 .inspect_err(|_| self.worker_state = WorkerState::Error)
2096 .context("failed to receive response for virtio gpu worker suspend request")??;
2097
2098 match response {
2099 WorkerResponse::Ok => Ok(()),
2100 _ => {
2101 panic!("unexpected response from virtio gpu worker sleep request");
2102 }
2103 }
2104 } else {
2105 Err(anyhow!("virtio gpu worker not available for restore"))
2106 }
2107 }
2108
reset(&mut self) -> anyhow::Result<()>2109 fn reset(&mut self) -> anyhow::Result<()> {
2110 self.stop_worker_thread();
2111 Ok(())
2112 }
2113 }
2114
2115 impl Drop for Gpu {
drop(&mut self)2116 fn drop(&mut self) {
2117 let _ = self.reset();
2118 }
2119 }
2120
2121 /// This struct takes the ownership of resource bridges and tracks which ones should be processed.
2122 struct ResourceBridges {
2123 resource_bridges: Vec<Tube>,
2124 should_process: Vec<bool>,
2125 }
2126
2127 impl ResourceBridges {
new(resource_bridges: Vec<Tube>) -> Self2128 pub fn new(resource_bridges: Vec<Tube>) -> Self {
2129 #[cfg(windows)]
2130 assert!(
2131 resource_bridges.is_empty(),
2132 "resource bridges are not supported on Windows"
2133 );
2134
2135 let mut resource_bridges = Self {
2136 resource_bridges,
2137 should_process: Default::default(),
2138 };
2139 resource_bridges.reset_should_process();
2140 resource_bridges
2141 }
2142
2143 // Appends raw descriptors of all resource bridges to the given vector.
append_raw_descriptors(&self, rds: &mut Vec<RawDescriptor>)2144 pub fn append_raw_descriptors(&self, rds: &mut Vec<RawDescriptor>) {
2145 for bridge in &self.resource_bridges {
2146 rds.push(bridge.as_raw_descriptor());
2147 }
2148 }
2149
2150 /// Adds all resource bridges to WaitContext.
add_to_wait_context(&self, wait_ctx: &mut WaitContext<WorkerToken>)2151 pub fn add_to_wait_context(&self, wait_ctx: &mut WaitContext<WorkerToken>) {
2152 for (index, bridge) in self.resource_bridges.iter().enumerate() {
2153 if let Err(e) = wait_ctx.add(bridge, WorkerToken::ResourceBridge { index }) {
2154 error!("failed to add resource bridge to WaitContext: {}", e);
2155 }
2156 }
2157 }
2158
2159 /// Marks that the resource bridge at the given index should be processed when
2160 /// `process_resource_bridges()` is called.
set_should_process(&mut self, index: usize)2161 pub fn set_should_process(&mut self, index: usize) {
2162 self.should_process[index] = true;
2163 }
2164
2165 /// Processes all resource bridges that have been marked as should be processed. The markings
2166 /// will be cleared before returning. Faulty resource bridges will be removed from WaitContext.
process_resource_bridges( &mut self, state: &mut Frontend, wait_ctx: &mut WaitContext<WorkerToken>, )2167 pub fn process_resource_bridges(
2168 &mut self,
2169 state: &mut Frontend,
2170 wait_ctx: &mut WaitContext<WorkerToken>,
2171 ) {
2172 for (bridge, &should_process) in self.resource_bridges.iter().zip(&self.should_process) {
2173 if should_process {
2174 if let Err(e) = state.process_resource_bridge(bridge) {
2175 error!("Failed to process resource bridge: {:#}", e);
2176 error!("Removing that resource bridge from the wait context.");
2177 wait_ctx.delete(bridge).unwrap_or_else(|e| {
2178 error!("Failed to remove faulty resource bridge: {:#}", e)
2179 });
2180 }
2181 }
2182 }
2183 self.reset_should_process();
2184 }
2185
reset_should_process(&mut self)2186 fn reset_should_process(&mut self) {
2187 self.should_process.clear();
2188 self.should_process
2189 .resize(self.resource_bridges.len(), false);
2190 }
2191 }
2192