1 // Copyright 2019 Intel Corporation. All Rights Reserved.
2 // Copyright 2019-2021 Alibaba Cloud. All rights reserved.
3 //
4 // SPDX-License-Identifier: Apache-2.0
5 
6 //! Traits for vhost user backend servers to implement virtio data plain services.
7 //!
8 //! Define two traits for vhost user backend servers to implement virtio data plane services.
9 //! The only difference between the two traits is mutability. The [VhostUserBackend] trait is
10 //! designed with interior mutability, so the implementor may choose the suitable way to protect
11 //! itself from concurrent accesses. The [VhostUserBackendMut] is designed without interior
12 //! mutability, and an implementation of:
13 //! ```ignore
14 //! impl<T: VhostUserBackendMut> VhostUserBackend for RwLock<T> { }
15 //! ```
16 //! is provided for convenience.
17 //!
18 //! [VhostUserBackend]: trait.VhostUserBackend.html
19 //! [VhostUserBackendMut]: trait.VhostUserBackendMut.html
20 
21 use std::io::Result;
22 use std::ops::Deref;
23 use std::sync::{Arc, Mutex, RwLock};
24 
25 use vhost::vhost_user::message::VhostUserProtocolFeatures;
26 use vhost::vhost_user::Slave;
27 use vm_memory::bitmap::Bitmap;
28 use vmm_sys_util::epoll::EventSet;
29 use vmm_sys_util::eventfd::EventFd;
30 
31 use super::vring::VringT;
32 use super::GM;
33 
34 /// Trait with interior mutability for vhost user backend servers to implement concrete services.
35 ///
36 /// To support multi-threading and asynchronous IO, we enforce `Send + Sync` bound.
37 pub trait VhostUserBackend<V, B = ()>: Send + Sync
38 where
39     V: VringT<GM<B>>,
40     B: Bitmap + 'static,
41 {
42     /// Get number of queues supported.
num_queues(&self) -> usize43     fn num_queues(&self) -> usize;
44 
45     /// Get maximum queue size supported.
max_queue_size(&self) -> usize46     fn max_queue_size(&self) -> usize;
47 
48     /// Get available virtio features.
features(&self) -> u6449     fn features(&self) -> u64;
50 
51     /// Set acknowledged virtio features.
acked_features(&self, _features: u64)52     fn acked_features(&self, _features: u64) {}
53 
54     /// Get available vhost protocol features.
protocol_features(&self) -> VhostUserProtocolFeatures55     fn protocol_features(&self) -> VhostUserProtocolFeatures;
56 
57     /// Enable or disable the virtio EVENT_IDX feature
set_event_idx(&self, enabled: bool)58     fn set_event_idx(&self, enabled: bool);
59 
60     /// Get virtio device configuration.
61     ///
62     /// A default implementation is provided as we cannot expect all backends to implement this
63     /// function.
get_config(&self, _offset: u32, _size: u32) -> Vec<u8>64     fn get_config(&self, _offset: u32, _size: u32) -> Vec<u8> {
65         Vec::new()
66     }
67 
68     /// Set virtio device configuration.
69     ///
70     /// A default implementation is provided as we cannot expect all backends to implement this
71     /// function.
set_config(&self, _offset: u32, _buf: &[u8]) -> Result<()>72     fn set_config(&self, _offset: u32, _buf: &[u8]) -> Result<()> {
73         Ok(())
74     }
75 
76     /// Update guest memory regions.
update_memory(&self, mem: GM<B>) -> Result<()>77     fn update_memory(&self, mem: GM<B>) -> Result<()>;
78 
79     /// Set handler for communicating with the master by the slave communication channel.
80     ///
81     /// A default implementation is provided as we cannot expect all backends to implement this
82     /// function.
set_slave_req_fd(&self, _slave: Slave)83     fn set_slave_req_fd(&self, _slave: Slave) {}
84 
85     /// Get the map to map queue index to worker thread index.
86     ///
87     /// A return value of [2, 2, 4] means: the first two queues will be handled by worker thread 0,
88     /// the following two queues will be handled by worker thread 1, and the last four queues will
89     /// be handled by worker thread 2.
queues_per_thread(&self) -> Vec<u64>90     fn queues_per_thread(&self) -> Vec<u64> {
91         vec![0xffff_ffff]
92     }
93 
94     /// Provide an optional exit EventFd for the specified worker thread.
95     ///
96     /// If an (`EventFd`, `token`) pair is returned, the returned `EventFd` will be monitored for IO
97     /// events by using epoll with the specified `token`. When the returned EventFd is written to,
98     /// the worker thread will exit.
exit_event(&self, _thread_index: usize) -> Option<EventFd>99     fn exit_event(&self, _thread_index: usize) -> Option<EventFd> {
100         None
101     }
102 
103     /// Handle IO events for backend registered file descriptors.
104     ///
105     /// This function gets called if the backend registered some additional listeners onto specific
106     /// file descriptors. The library can handle virtqueues on its own, but does not know what to
107     /// do with events happening on custom listeners.
handle_event( &self, device_event: u16, evset: EventSet, vrings: &[V], thread_id: usize, ) -> Result<bool>108     fn handle_event(
109         &self,
110         device_event: u16,
111         evset: EventSet,
112         vrings: &[V],
113         thread_id: usize,
114     ) -> Result<bool>;
115 }
116 
117 /// Trait without interior mutability for vhost user backend servers to implement concrete services.
118 pub trait VhostUserBackendMut<V, B = ()>: Send + Sync
119 where
120     V: VringT<GM<B>>,
121     B: Bitmap + 'static,
122 {
123     /// Get number of queues supported.
num_queues(&self) -> usize124     fn num_queues(&self) -> usize;
125 
126     /// Get maximum queue size supported.
max_queue_size(&self) -> usize127     fn max_queue_size(&self) -> usize;
128 
129     /// Get available virtio features.
features(&self) -> u64130     fn features(&self) -> u64;
131 
132     /// Set acknowledged virtio features.
acked_features(&mut self, _features: u64)133     fn acked_features(&mut self, _features: u64) {}
134 
135     /// Get available vhost protocol features.
protocol_features(&self) -> VhostUserProtocolFeatures136     fn protocol_features(&self) -> VhostUserProtocolFeatures;
137 
138     /// Enable or disable the virtio EVENT_IDX feature
set_event_idx(&mut self, enabled: bool)139     fn set_event_idx(&mut self, enabled: bool);
140 
141     /// Get virtio device configuration.
142     ///
143     /// A default implementation is provided as we cannot expect all backends to implement this
144     /// function.
get_config(&self, _offset: u32, _size: u32) -> Vec<u8>145     fn get_config(&self, _offset: u32, _size: u32) -> Vec<u8> {
146         Vec::new()
147     }
148 
149     /// Set virtio device configuration.
150     ///
151     /// A default implementation is provided as we cannot expect all backends to implement this
152     /// function.
set_config(&mut self, _offset: u32, _buf: &[u8]) -> Result<()>153     fn set_config(&mut self, _offset: u32, _buf: &[u8]) -> Result<()> {
154         Ok(())
155     }
156 
157     /// Update guest memory regions.
update_memory(&mut self, mem: GM<B>) -> Result<()>158     fn update_memory(&mut self, mem: GM<B>) -> Result<()>;
159 
160     /// Set handler for communicating with the master by the slave communication channel.
161     ///
162     /// A default implementation is provided as we cannot expect all backends to implement this
163     /// function.
set_slave_req_fd(&mut self, _slave: Slave)164     fn set_slave_req_fd(&mut self, _slave: Slave) {}
165 
166     /// Get the map to map queue index to worker thread index.
167     ///
168     /// A return value of [2, 2, 4] means: the first two queues will be handled by worker thread 0,
169     /// the following two queues will be handled by worker thread 1, and the last four queues will
170     /// be handled by worker thread 2.
queues_per_thread(&self) -> Vec<u64>171     fn queues_per_thread(&self) -> Vec<u64> {
172         vec![0xffff_ffff]
173     }
174 
175     /// Provide an optional exit EventFd for the specified worker thread.
176     ///
177     /// If an (`EventFd`, `token`) pair is returned, the returned `EventFd` will be monitored for IO
178     /// events by using epoll with the specified `token`. When the returned EventFd is written to,
179     /// the worker thread will exit.
exit_event(&self, _thread_index: usize) -> Option<EventFd>180     fn exit_event(&self, _thread_index: usize) -> Option<EventFd> {
181         None
182     }
183 
184     /// Handle IO events for backend registered file descriptors.
185     ///
186     /// This function gets called if the backend registered some additional listeners onto specific
187     /// file descriptors. The library can handle virtqueues on its own, but does not know what to
188     /// do with events happening on custom listeners.
handle_event( &mut self, device_event: u16, evset: EventSet, vrings: &[V], thread_id: usize, ) -> Result<bool>189     fn handle_event(
190         &mut self,
191         device_event: u16,
192         evset: EventSet,
193         vrings: &[V],
194         thread_id: usize,
195     ) -> Result<bool>;
196 }
197 
198 impl<T: VhostUserBackend<V, B>, V, B> VhostUserBackend<V, B> for Arc<T>
199 where
200     V: VringT<GM<B>>,
201     B: Bitmap + 'static,
202 {
num_queues(&self) -> usize203     fn num_queues(&self) -> usize {
204         self.deref().num_queues()
205     }
206 
max_queue_size(&self) -> usize207     fn max_queue_size(&self) -> usize {
208         self.deref().max_queue_size()
209     }
210 
features(&self) -> u64211     fn features(&self) -> u64 {
212         self.deref().features()
213     }
214 
acked_features(&self, features: u64)215     fn acked_features(&self, features: u64) {
216         self.deref().acked_features(features)
217     }
218 
protocol_features(&self) -> VhostUserProtocolFeatures219     fn protocol_features(&self) -> VhostUserProtocolFeatures {
220         self.deref().protocol_features()
221     }
222 
set_event_idx(&self, enabled: bool)223     fn set_event_idx(&self, enabled: bool) {
224         self.deref().set_event_idx(enabled)
225     }
226 
get_config(&self, offset: u32, size: u32) -> Vec<u8>227     fn get_config(&self, offset: u32, size: u32) -> Vec<u8> {
228         self.deref().get_config(offset, size)
229     }
230 
set_config(&self, offset: u32, buf: &[u8]) -> Result<()>231     fn set_config(&self, offset: u32, buf: &[u8]) -> Result<()> {
232         self.deref().set_config(offset, buf)
233     }
234 
update_memory(&self, mem: GM<B>) -> Result<()>235     fn update_memory(&self, mem: GM<B>) -> Result<()> {
236         self.deref().update_memory(mem)
237     }
238 
set_slave_req_fd(&self, slave: Slave)239     fn set_slave_req_fd(&self, slave: Slave) {
240         self.deref().set_slave_req_fd(slave)
241     }
242 
queues_per_thread(&self) -> Vec<u64>243     fn queues_per_thread(&self) -> Vec<u64> {
244         self.deref().queues_per_thread()
245     }
246 
exit_event(&self, thread_index: usize) -> Option<EventFd>247     fn exit_event(&self, thread_index: usize) -> Option<EventFd> {
248         self.deref().exit_event(thread_index)
249     }
250 
handle_event( &self, device_event: u16, evset: EventSet, vrings: &[V], thread_id: usize, ) -> Result<bool>251     fn handle_event(
252         &self,
253         device_event: u16,
254         evset: EventSet,
255         vrings: &[V],
256         thread_id: usize,
257     ) -> Result<bool> {
258         self.deref()
259             .handle_event(device_event, evset, vrings, thread_id)
260     }
261 }
262 
263 impl<T: VhostUserBackendMut<V, B>, V, B> VhostUserBackend<V, B> for Mutex<T>
264 where
265     V: VringT<GM<B>>,
266     B: Bitmap + 'static,
267 {
num_queues(&self) -> usize268     fn num_queues(&self) -> usize {
269         self.lock().unwrap().num_queues()
270     }
271 
max_queue_size(&self) -> usize272     fn max_queue_size(&self) -> usize {
273         self.lock().unwrap().max_queue_size()
274     }
275 
features(&self) -> u64276     fn features(&self) -> u64 {
277         self.lock().unwrap().features()
278     }
279 
acked_features(&self, features: u64)280     fn acked_features(&self, features: u64) {
281         self.lock().unwrap().acked_features(features)
282     }
283 
protocol_features(&self) -> VhostUserProtocolFeatures284     fn protocol_features(&self) -> VhostUserProtocolFeatures {
285         self.lock().unwrap().protocol_features()
286     }
287 
set_event_idx(&self, enabled: bool)288     fn set_event_idx(&self, enabled: bool) {
289         self.lock().unwrap().set_event_idx(enabled)
290     }
291 
get_config(&self, offset: u32, size: u32) -> Vec<u8>292     fn get_config(&self, offset: u32, size: u32) -> Vec<u8> {
293         self.lock().unwrap().get_config(offset, size)
294     }
295 
set_config(&self, offset: u32, buf: &[u8]) -> Result<()>296     fn set_config(&self, offset: u32, buf: &[u8]) -> Result<()> {
297         self.lock().unwrap().set_config(offset, buf)
298     }
299 
update_memory(&self, mem: GM<B>) -> Result<()>300     fn update_memory(&self, mem: GM<B>) -> Result<()> {
301         self.lock().unwrap().update_memory(mem)
302     }
303 
set_slave_req_fd(&self, slave: Slave)304     fn set_slave_req_fd(&self, slave: Slave) {
305         self.lock().unwrap().set_slave_req_fd(slave)
306     }
307 
queues_per_thread(&self) -> Vec<u64>308     fn queues_per_thread(&self) -> Vec<u64> {
309         self.lock().unwrap().queues_per_thread()
310     }
311 
exit_event(&self, thread_index: usize) -> Option<EventFd>312     fn exit_event(&self, thread_index: usize) -> Option<EventFd> {
313         self.lock().unwrap().exit_event(thread_index)
314     }
315 
handle_event( &self, device_event: u16, evset: EventSet, vrings: &[V], thread_id: usize, ) -> Result<bool>316     fn handle_event(
317         &self,
318         device_event: u16,
319         evset: EventSet,
320         vrings: &[V],
321         thread_id: usize,
322     ) -> Result<bool> {
323         self.lock()
324             .unwrap()
325             .handle_event(device_event, evset, vrings, thread_id)
326     }
327 }
328 
329 impl<T: VhostUserBackendMut<V, B>, V, B> VhostUserBackend<V, B> for RwLock<T>
330 where
331     V: VringT<GM<B>>,
332     B: Bitmap + 'static,
333 {
num_queues(&self) -> usize334     fn num_queues(&self) -> usize {
335         self.read().unwrap().num_queues()
336     }
337 
max_queue_size(&self) -> usize338     fn max_queue_size(&self) -> usize {
339         self.read().unwrap().max_queue_size()
340     }
341 
features(&self) -> u64342     fn features(&self) -> u64 {
343         self.read().unwrap().features()
344     }
345 
acked_features(&self, features: u64)346     fn acked_features(&self, features: u64) {
347         self.write().unwrap().acked_features(features)
348     }
349 
protocol_features(&self) -> VhostUserProtocolFeatures350     fn protocol_features(&self) -> VhostUserProtocolFeatures {
351         self.read().unwrap().protocol_features()
352     }
353 
set_event_idx(&self, enabled: bool)354     fn set_event_idx(&self, enabled: bool) {
355         self.write().unwrap().set_event_idx(enabled)
356     }
357 
get_config(&self, offset: u32, size: u32) -> Vec<u8>358     fn get_config(&self, offset: u32, size: u32) -> Vec<u8> {
359         self.read().unwrap().get_config(offset, size)
360     }
361 
set_config(&self, offset: u32, buf: &[u8]) -> Result<()>362     fn set_config(&self, offset: u32, buf: &[u8]) -> Result<()> {
363         self.write().unwrap().set_config(offset, buf)
364     }
365 
update_memory(&self, mem: GM<B>) -> Result<()>366     fn update_memory(&self, mem: GM<B>) -> Result<()> {
367         self.write().unwrap().update_memory(mem)
368     }
369 
set_slave_req_fd(&self, slave: Slave)370     fn set_slave_req_fd(&self, slave: Slave) {
371         self.write().unwrap().set_slave_req_fd(slave)
372     }
373 
queues_per_thread(&self) -> Vec<u64>374     fn queues_per_thread(&self) -> Vec<u64> {
375         self.read().unwrap().queues_per_thread()
376     }
377 
exit_event(&self, thread_index: usize) -> Option<EventFd>378     fn exit_event(&self, thread_index: usize) -> Option<EventFd> {
379         self.read().unwrap().exit_event(thread_index)
380     }
381 
handle_event( &self, device_event: u16, evset: EventSet, vrings: &[V], thread_id: usize, ) -> Result<bool>382     fn handle_event(
383         &self,
384         device_event: u16,
385         evset: EventSet,
386         vrings: &[V],
387         thread_id: usize,
388     ) -> Result<bool> {
389         self.write()
390             .unwrap()
391             .handle_event(device_event, evset, vrings, thread_id)
392     }
393 }
394 
395 #[cfg(test)]
396 pub mod tests {
397     use super::*;
398     use crate::VringRwLock;
399     use std::sync::Mutex;
400     use vm_memory::{GuestAddress, GuestMemoryAtomic, GuestMemoryMmap};
401 
402     pub struct MockVhostBackend {
403         events: u64,
404         event_idx: bool,
405         acked_features: u64,
406     }
407 
408     impl MockVhostBackend {
new() -> Self409         pub fn new() -> Self {
410             MockVhostBackend {
411                 events: 0,
412                 event_idx: false,
413                 acked_features: 0,
414             }
415         }
416     }
417 
418     impl VhostUserBackendMut<VringRwLock, ()> for MockVhostBackend {
num_queues(&self) -> usize419         fn num_queues(&self) -> usize {
420             2
421         }
422 
max_queue_size(&self) -> usize423         fn max_queue_size(&self) -> usize {
424             256
425         }
426 
features(&self) -> u64427         fn features(&self) -> u64 {
428             0xffff_ffff_ffff_ffff
429         }
430 
acked_features(&mut self, features: u64)431         fn acked_features(&mut self, features: u64) {
432             self.acked_features = features;
433         }
434 
protocol_features(&self) -> VhostUserProtocolFeatures435         fn protocol_features(&self) -> VhostUserProtocolFeatures {
436             VhostUserProtocolFeatures::all()
437         }
438 
set_event_idx(&mut self, enabled: bool)439         fn set_event_idx(&mut self, enabled: bool) {
440             self.event_idx = enabled;
441         }
442 
get_config(&self, offset: u32, size: u32) -> Vec<u8>443         fn get_config(&self, offset: u32, size: u32) -> Vec<u8> {
444             assert_eq!(offset, 0x200);
445             assert_eq!(size, 8);
446 
447             vec![0xa5u8; 8]
448         }
449 
set_config(&mut self, offset: u32, buf: &[u8]) -> Result<()>450         fn set_config(&mut self, offset: u32, buf: &[u8]) -> Result<()> {
451             assert_eq!(offset, 0x200);
452             assert_eq!(buf.len(), 8);
453             assert_eq!(buf, &[0xa5u8; 8]);
454 
455             Ok(())
456         }
457 
update_memory(&mut self, _atomic_mem: GuestMemoryAtomic<GuestMemoryMmap>) -> Result<()>458         fn update_memory(&mut self, _atomic_mem: GuestMemoryAtomic<GuestMemoryMmap>) -> Result<()> {
459             Ok(())
460         }
461 
set_slave_req_fd(&mut self, _slave: Slave)462         fn set_slave_req_fd(&mut self, _slave: Slave) {}
463 
queues_per_thread(&self) -> Vec<u64>464         fn queues_per_thread(&self) -> Vec<u64> {
465             vec![1, 1]
466         }
467 
exit_event(&self, _thread_index: usize) -> Option<EventFd>468         fn exit_event(&self, _thread_index: usize) -> Option<EventFd> {
469             let event_fd = EventFd::new(0).unwrap();
470 
471             Some(event_fd)
472         }
473 
handle_event( &mut self, _device_event: u16, _evset: EventSet, _vrings: &[VringRwLock], _thread_id: usize, ) -> Result<bool>474         fn handle_event(
475             &mut self,
476             _device_event: u16,
477             _evset: EventSet,
478             _vrings: &[VringRwLock],
479             _thread_id: usize,
480         ) -> Result<bool> {
481             self.events += 1;
482 
483             Ok(false)
484         }
485     }
486 
487     #[test]
test_new_mock_backend_mutex()488     fn test_new_mock_backend_mutex() {
489         let backend = Arc::new(Mutex::new(MockVhostBackend::new()));
490 
491         assert_eq!(backend.num_queues(), 2);
492         assert_eq!(backend.max_queue_size(), 256);
493         assert_eq!(backend.features(), 0xffff_ffff_ffff_ffff);
494         assert_eq!(
495             backend.protocol_features(),
496             VhostUserProtocolFeatures::all()
497         );
498         assert_eq!(backend.queues_per_thread(), [1, 1]);
499 
500         assert_eq!(backend.get_config(0x200, 8), vec![0xa5; 8]);
501         backend.set_config(0x200, &[0xa5; 8]).unwrap();
502 
503         backend.acked_features(0xffff);
504         assert_eq!(backend.lock().unwrap().acked_features, 0xffff);
505 
506         backend.set_event_idx(true);
507         assert!(backend.lock().unwrap().event_idx);
508 
509         let _ = backend.exit_event(0).unwrap();
510 
511         let mem = GuestMemoryAtomic::new(
512             GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0x100000), 0x10000)]).unwrap(),
513         );
514         backend.update_memory(mem).unwrap();
515     }
516 
517     #[test]
test_new_mock_backend_rwlock()518     fn test_new_mock_backend_rwlock() {
519         let backend = Arc::new(RwLock::new(MockVhostBackend::new()));
520 
521         assert_eq!(backend.num_queues(), 2);
522         assert_eq!(backend.max_queue_size(), 256);
523         assert_eq!(backend.features(), 0xffff_ffff_ffff_ffff);
524         assert_eq!(
525             backend.protocol_features(),
526             VhostUserProtocolFeatures::all()
527         );
528         assert_eq!(backend.queues_per_thread(), [1, 1]);
529 
530         assert_eq!(backend.get_config(0x200, 8), vec![0xa5; 8]);
531         backend.set_config(0x200, &[0xa5; 8]).unwrap();
532 
533         backend.acked_features(0xffff);
534         assert_eq!(backend.read().unwrap().acked_features, 0xffff);
535 
536         backend.set_event_idx(true);
537         assert!(backend.read().unwrap().event_idx);
538 
539         let _ = backend.exit_event(0).unwrap();
540 
541         let mem = GuestMemoryAtomic::new(
542             GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0x100000), 0x10000)]).unwrap(),
543         );
544         backend.update_memory(mem.clone()).unwrap();
545 
546         let vring = VringRwLock::new(mem, 0x1000).unwrap();
547         backend
548             .handle_event(0x1, EventSet::IN, &[vring], 0)
549             .unwrap();
550     }
551 }
552