xref: /aosp_15_r20/external/crosvm/devices/src/virtio/vhost/user/device/gpu/sys/windows.rs (revision bb4ee6a4ae7042d18b07a98463b9c8b875e44b39)
1 // Copyright 2022 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 use std::cell::RefCell;
6 use std::collections::BTreeMap;
7 use std::path::PathBuf;
8 use std::rc::Rc;
9 use std::sync::Arc;
10 
11 use anyhow::bail;
12 use anyhow::Context;
13 use argh::FromArgs;
14 use base::error;
15 use base::info;
16 use base::Event;
17 use base::FromRawDescriptor;
18 use base::RawDescriptor;
19 use base::SafeDescriptor;
20 use base::SendTube;
21 use base::StreamChannel;
22 use base::Tube;
23 use broker_ipc::common_child_setup;
24 use broker_ipc::CommonChildStartupArgs;
25 use cros_async::AsyncTube;
26 use cros_async::AsyncWrapper;
27 use cros_async::EventAsync;
28 use cros_async::Executor;
29 use gpu_display::EventDevice;
30 use gpu_display::WindowProcedureThread;
31 use gpu_display::WindowProcedureThreadBuilder;
32 use hypervisor::ProtectionType;
33 use serde::Deserialize;
34 use serde::Serialize;
35 use sync::Mutex;
36 use tube_transporter::TubeToken;
37 use vm_control::gpu::GpuControlCommand;
38 use vm_control::gpu::GpuControlResult;
39 
40 use crate::virtio;
41 use crate::virtio::gpu;
42 use crate::virtio::gpu::ProcessDisplayResult;
43 use crate::virtio::vhost::user::device::gpu::GpuBackend;
44 use crate::virtio::vhost::user::device::handler::sys::windows::read_from_tube_transporter;
45 use crate::virtio::vhost::user::device::handler::sys::windows::run_handler;
46 use crate::virtio::vhost::user::device::handler::DeviceRequestHandler;
47 use crate::virtio::Gpu;
48 use crate::virtio::GpuDisplayParameters;
49 use crate::virtio::GpuParameters;
50 use crate::virtio::Interrupt;
51 
52 pub mod generic;
53 pub use generic as product;
54 
run_display( display: EventAsync, state: Rc<RefCell<gpu::Frontend>>, gpu: Rc<RefCell<gpu::Gpu>>, )55 async fn run_display(
56     display: EventAsync,
57     state: Rc<RefCell<gpu::Frontend>>,
58     gpu: Rc<RefCell<gpu::Gpu>>,
59 ) {
60     loop {
61         if let Err(e) = display.next_val().await {
62             error!(
63                 "Failed to wait for display context to become readable: {}",
64                 e
65             );
66             break;
67         }
68 
69         match state.borrow_mut().process_display() {
70             ProcessDisplayResult::Error(e) => {
71                 error!("Failed to process display events: {}", e);
72                 break;
73             }
74             ProcessDisplayResult::CloseRequested => {
75                 let res = gpu.borrow().send_exit_evt();
76                 if res.is_err() {
77                     error!("Failed to send exit event: {:?}", res);
78                 }
79                 break;
80             }
81             ProcessDisplayResult::Success => {}
82         }
83     }
84 }
85 
run_gpu_control_command_handler( mut gpu_control_tube: AsyncTube, state: Rc<RefCell<gpu::Frontend>>, interrupt: Interrupt, )86 async fn run_gpu_control_command_handler(
87     mut gpu_control_tube: AsyncTube,
88     state: Rc<RefCell<gpu::Frontend>>,
89     interrupt: Interrupt,
90 ) {
91     'wait: loop {
92         let req = match gpu_control_tube.next::<GpuControlCommand>().await {
93             Ok(req) => req,
94             Err(e) => {
95                 error!("GPU control socket failed to recv: {:?}", e);
96                 break 'wait;
97             }
98         };
99 
100         let resp = state.borrow_mut().process_gpu_control_command(req);
101 
102         if let GpuControlResult::DisplaysUpdated = resp {
103             info!("Signaling display config change");
104             interrupt.signal_config_changed();
105         }
106 
107         if let Err(e) = gpu_control_tube.send(resp).await {
108             error!("Display control socket failed to send: {}", e);
109             break 'wait;
110         }
111     }
112 }
113 
114 impl GpuBackend {
start_platform_workers(&mut self, interrupt: Interrupt) -> anyhow::Result<()>115     pub fn start_platform_workers(&mut self, interrupt: Interrupt) -> anyhow::Result<()> {
116         let state = self
117             .state
118             .as_ref()
119             .context("frontend state wasn't set")?
120             .clone();
121 
122         // Start handling the display.
123         // SAFETY:
124         // Safe because the raw descriptor is valid, and an event.
125         let display = unsafe {
126             EventAsync::clone_raw_without_reset(&*state.borrow_mut().display().borrow(), &self.ex)
127         }
128         .context("failed to clone inner WaitContext for gpu display")?;
129 
130         let task = self
131             .ex
132             .spawn_local(run_display(display, state.clone(), self.gpu.clone()));
133         self.platform_worker_tx
134             .unbounded_send(task)
135             .context("sending the run_display task for the initial display")?;
136 
137         let task = self.ex.spawn_local(run_gpu_control_command_handler(
138             AsyncTube::new(
139                 &self.ex,
140                 self.gpu
141                     .borrow_mut()
142                     .gpu_control_tube
143                     .take()
144                     .expect("gpu control tube must exist"),
145             )
146             .expect("gpu control tube creation"),
147             state,
148             interrupt,
149         ));
150         self.platform_worker_tx
151             .unbounded_send(task)
152             .context("sending the run_gpu_control_command_handler task")?;
153 
154         Ok(())
155     }
156 }
157 
158 #[derive(FromArgs)]
159 /// GPU device
160 #[argh(subcommand, name = "gpu", description = "")]
161 pub struct Options {
162     #[argh(
163         option,
164         description = "pipe handle end for Tube Transporter",
165         arg_name = "HANDLE"
166     )]
167     bootstrap: usize,
168 }
169 
170 /// Main process end for input event devices.
171 #[derive(Deserialize, Serialize)]
172 pub struct InputEventVmmConfig {
173     // Pipes to receive input events on.
174     pub multi_touch_pipes: Vec<StreamChannel>,
175     pub mouse_pipes: Vec<StreamChannel>,
176     pub keyboard_pipes: Vec<StreamChannel>,
177 }
178 
179 /// Backend process end for input event devices.
180 #[derive(Deserialize, Serialize)]
181 pub struct InputEventBackendConfig {
182     // Event devices to send input events to.
183     pub event_devices: Vec<EventDevice>,
184 }
185 
186 /// Configuration for running input event devices, split by a part sent to the main VMM and a part
187 /// sent to the window thread (either main process or a vhost-user process).
188 #[derive(Deserialize, Serialize)]
189 pub struct InputEventSplitConfig {
190     // Config sent to the backend.
191     pub backend_config: Option<InputEventBackendConfig>,
192     // Config sent to the main process.
193     pub vmm_config: InputEventVmmConfig,
194 }
195 
196 /// Main process end for a GPU device.
197 #[derive(Deserialize, Serialize)]
198 pub struct GpuVmmConfig {
199     // Tube for setting up the vhost-user connection. May not exist if not using vhost-user.
200     pub main_vhost_user_tube: Option<Tube>,
201     // A tube to forward GPU control commands in the main process.
202     pub gpu_control_host_tube: Option<Tube>,
203     pub product_config: product::GpuVmmConfig,
204 }
205 
206 /// Config arguments passed through the bootstrap Tube from the broker to the Gpu backend
207 /// process.
208 #[derive(Deserialize, Serialize)]
209 pub struct GpuBackendConfig {
210     // Tube for setting up the vhost-user connection. May not exist if not using vhost-user.
211     pub device_vhost_user_tube: Option<Tube>,
212     // An event for an incoming exit request.
213     pub exit_event: Event,
214     // A tube to send an exit request.
215     pub exit_evt_wrtube: SendTube,
216     // A tube to handle GPU control commands in the GPU device.
217     pub gpu_control_device_tube: Tube,
218     // GPU parameters.
219     pub params: GpuParameters,
220     // Product related configurations.
221     pub product_config: product::GpuBackendConfig,
222 }
223 
224 #[derive(Deserialize, Serialize)]
225 pub struct WindowProcedureThreadVmmConfig {
226     pub product_config: product::WindowProcedureThreadVmmConfig,
227 }
228 
229 #[derive(Deserialize, Serialize)]
230 pub struct WindowProcedureThreadSplitConfig {
231     // This is the config sent to the backend process.
232     pub wndproc_thread_builder: Option<WindowProcedureThreadBuilder>,
233     // Config sent to the main process.
234     pub vmm_config: WindowProcedureThreadVmmConfig,
235 }
236 
run_gpu_device(opts: Options) -> anyhow::Result<()>237 pub fn run_gpu_device(opts: Options) -> anyhow::Result<()> {
238     cros_tracing::init();
239 
240     let raw_transport_tube = opts.bootstrap as RawDescriptor;
241 
242     let mut tubes = read_from_tube_transporter(raw_transport_tube)?;
243 
244     let bootstrap_tube = tubes.get_tube(TubeToken::Bootstrap)?;
245 
246     let startup_args: CommonChildStartupArgs = bootstrap_tube.recv::<CommonChildStartupArgs>()?;
247     let _child_cleanup = common_child_setup(startup_args)?;
248 
249     let (mut config, input_event_backend_config, wndproc_thread_builder): (
250         GpuBackendConfig,
251         InputEventBackendConfig,
252         WindowProcedureThreadBuilder,
253     ) = bootstrap_tube
254         .recv()
255         .context("failed to parse GPU backend config from bootstrap tube")?;
256 
257     // TODO(b/213170185): Uncomment once sandbox is upstreamed.
258     // if sandbox::is_sandbox_target() {
259     //     sandbox::TargetServices::get()
260     //         .expect("failed to get target services")
261     //         .unwrap()
262     //         .lower_token();
263     // }
264 
265     let wndproc_thread = wndproc_thread_builder
266         .start_thread()
267         .context("Failed to create window procedure thread for vhost GPU")?;
268 
269     run_gpu_device_worker(
270         config,
271         input_event_backend_config.event_devices,
272         wndproc_thread,
273     )
274 }
275 
276 /// Run the GPU device worker.
run_gpu_device_worker( mut config: GpuBackendConfig, event_devices: Vec<EventDevice>, wndproc_thread: WindowProcedureThread, ) -> anyhow::Result<()>277 pub fn run_gpu_device_worker(
278     mut config: GpuBackendConfig,
279     event_devices: Vec<EventDevice>,
280     wndproc_thread: WindowProcedureThread,
281 ) -> anyhow::Result<()> {
282     let vhost_user_tube = config
283         .device_vhost_user_tube
284         .expect("vhost-user gpu tube must be set");
285 
286     if config.params.display_params.is_empty() {
287         config
288             .params
289             .display_params
290             .push(GpuDisplayParameters::default());
291     }
292 
293     let display_backends = vec![virtio::DisplayBackend::WinApi];
294 
295     let mut gpu_params = config.params.clone();
296 
297     // Fallback for when external_blob is not available on the machine. Currently always off.
298     gpu_params.system_blob = false;
299 
300     let base_features = virtio::base_features(ProtectionType::Unprotected);
301 
302     let gpu = Rc::new(RefCell::new(Gpu::new(
303         config.exit_evt_wrtube,
304         config.gpu_control_device_tube,
305         /* resource_bridges= */ Vec::new(),
306         display_backends,
307         &gpu_params,
308         /* render_server_descriptor */ None,
309         event_devices,
310         base_features,
311         /* channels= */ &Default::default(),
312         wndproc_thread,
313     )));
314 
315     let ex = Executor::new().context("failed to create executor")?;
316 
317     let (platform_worker_tx, platform_worker_rx) = futures::channel::mpsc::unbounded();
318     let backend = GpuBackend {
319         ex: ex.clone(),
320         gpu,
321         resource_bridges: Default::default(),
322         state: None,
323         fence_state: Default::default(),
324         queue_workers: Default::default(),
325         platform_worker_tx,
326         platform_worker_rx,
327         shmem_mapper: Arc::new(Mutex::new(None)),
328     };
329 
330     let handler = DeviceRequestHandler::new(backend);
331 
332     info!("vhost-user gpu device ready, starting run loop...");
333 
334     // Run until the backend is finished.
335     if let Err(e) = ex.run_until(run_handler(
336         Box::new(handler),
337         vhost_user_tube,
338         config.exit_event,
339         &ex,
340     )) {
341         bail!("error occurred: {}", e);
342     }
343 
344     // Process any tasks from the backend's destructor.
345     Ok(ex.run_until(async {})?)
346 }
347