1 // Copyright 2019 Intel Corporation. All Rights Reserved.
2 // Copyright 2021 Alibaba Cloud Computing. All rights reserved.
3 //
4 // SPDX-License-Identifier: Apache-2.0
5 
6 //! Struct to maintain state information and manipulate vhost-user queues.
7 
8 use std::fs::File;
9 use std::io;
10 use std::ops::{Deref, DerefMut};
11 use std::os::unix::io::{FromRawFd, IntoRawFd};
12 use std::result::Result;
13 use std::sync::atomic::Ordering;
14 use std::sync::{Arc, Mutex, MutexGuard, RwLock, RwLockReadGuard, RwLockWriteGuard};
15 
16 use virtio_queue::{Error as VirtQueError, Queue, QueueT};
17 use vm_memory::{GuestAddress, GuestAddressSpace, GuestMemoryAtomic, GuestMemoryMmap};
18 use vmm_sys_util::eventfd::EventFd;
19 
20 /// Trait for objects returned by `VringT::get_ref()`.
21 pub trait VringStateGuard<'a, M: GuestAddressSpace> {
22     /// Type for guard returned by `VringT::get_ref()`.
23     type G: Deref<Target = VringState<M>>;
24 }
25 
26 /// Trait for objects returned by `VringT::get_mut()`.
27 pub trait VringStateMutGuard<'a, M: GuestAddressSpace> {
28     /// Type for guard returned by `VringT::get_mut()`.
29     type G: DerefMut<Target = VringState<M>>;
30 }
31 
32 pub trait VringT<M: GuestAddressSpace>:
33     for<'a> VringStateGuard<'a, M> + for<'a> VringStateMutGuard<'a, M>
34 {
35     /// Create a new instance of Vring.
new(mem: M, max_queue_size: u16) -> Result<Self, VirtQueError> where Self: Sized36     fn new(mem: M, max_queue_size: u16) -> Result<Self, VirtQueError>
37     where
38         Self: Sized;
39 
40     /// Get an immutable reference to the kick event fd.
get_ref(&self) -> <Self as VringStateGuard<M>>::G41     fn get_ref(&self) -> <Self as VringStateGuard<M>>::G;
42 
43     /// Get a mutable reference to the kick event fd.
get_mut(&self) -> <Self as VringStateMutGuard<M>>::G44     fn get_mut(&self) -> <Self as VringStateMutGuard<M>>::G;
45 
46     /// Add an used descriptor into the used queue.
add_used(&self, desc_index: u16, len: u32) -> Result<(), VirtQueError>47     fn add_used(&self, desc_index: u16, len: u32) -> Result<(), VirtQueError>;
48 
49     /// Notify the vhost-user master that used descriptors have been put into the used queue.
signal_used_queue(&self) -> io::Result<()>50     fn signal_used_queue(&self) -> io::Result<()>;
51 
52     /// Enable event notification for queue.
enable_notification(&self) -> Result<bool, VirtQueError>53     fn enable_notification(&self) -> Result<bool, VirtQueError>;
54 
55     /// Disable event notification for queue.
disable_notification(&self) -> Result<(), VirtQueError>56     fn disable_notification(&self) -> Result<(), VirtQueError>;
57 
58     /// Check whether a notification to the guest is needed.
needs_notification(&self) -> Result<bool, VirtQueError>59     fn needs_notification(&self) -> Result<bool, VirtQueError>;
60 
61     /// Set vring enabled state.
set_enabled(&self, enabled: bool)62     fn set_enabled(&self, enabled: bool);
63 
64     /// Set queue addresses for descriptor table, available ring and used ring.
set_queue_info( &self, desc_table: u64, avail_ring: u64, used_ring: u64, ) -> Result<(), VirtQueError>65     fn set_queue_info(
66         &self,
67         desc_table: u64,
68         avail_ring: u64,
69         used_ring: u64,
70     ) -> Result<(), VirtQueError>;
71 
72     /// Get queue next avail head.
queue_next_avail(&self) -> u1673     fn queue_next_avail(&self) -> u16;
74 
75     /// Set queue next avail head.
set_queue_next_avail(&self, base: u16)76     fn set_queue_next_avail(&self, base: u16);
77 
78     /// Set queue next used head.
set_queue_next_used(&self, idx: u16)79     fn set_queue_next_used(&self, idx: u16);
80 
81     /// Get queue next used head index from the guest memory.
queue_used_idx(&self) -> Result<u16, VirtQueError>82     fn queue_used_idx(&self) -> Result<u16, VirtQueError>;
83 
84     /// Set configured queue size.
set_queue_size(&self, num: u16)85     fn set_queue_size(&self, num: u16);
86 
87     /// Enable/disable queue event index feature.
set_queue_event_idx(&self, enabled: bool)88     fn set_queue_event_idx(&self, enabled: bool);
89 
90     /// Set queue enabled state.
set_queue_ready(&self, ready: bool)91     fn set_queue_ready(&self, ready: bool);
92 
93     /// Set `EventFd` for kick.
set_kick(&self, file: Option<File>)94     fn set_kick(&self, file: Option<File>);
95 
96     /// Read event from the kick `EventFd`.
read_kick(&self) -> io::Result<bool>97     fn read_kick(&self) -> io::Result<bool>;
98 
99     /// Set `EventFd` for call.
set_call(&self, file: Option<File>)100     fn set_call(&self, file: Option<File>);
101 
102     /// Set `EventFd` for err.
set_err(&self, file: Option<File>)103     fn set_err(&self, file: Option<File>);
104 }
105 
106 /// Struct to maintain raw state information for a vhost-user queue.
107 ///
108 /// This struct maintains all information of a virito queue, and could be used as an `VringT`
109 /// object for single-threaded context.
110 pub struct VringState<M: GuestAddressSpace = GuestMemoryAtomic<GuestMemoryMmap>> {
111     queue: Queue,
112     kick: Option<EventFd>,
113     call: Option<EventFd>,
114     err: Option<EventFd>,
115     enabled: bool,
116     mem: M,
117 }
118 
119 impl<M: GuestAddressSpace> VringState<M> {
120     /// Create a new instance of Vring.
new(mem: M, max_queue_size: u16) -> Result<Self, VirtQueError>121     fn new(mem: M, max_queue_size: u16) -> Result<Self, VirtQueError> {
122         Ok(VringState {
123             queue: Queue::new(max_queue_size)?,
124             kick: None,
125             call: None,
126             err: None,
127             enabled: false,
128             mem,
129         })
130     }
131 
132     /// Get an immutable reference to the underlying raw `Queue` object.
get_queue(&self) -> &Queue133     pub fn get_queue(&self) -> &Queue {
134         &self.queue
135     }
136 
137     /// Get a mutable reference to the underlying raw `Queue` object.
get_queue_mut(&mut self) -> &mut Queue138     pub fn get_queue_mut(&mut self) -> &mut Queue {
139         &mut self.queue
140     }
141 
142     /// Add an used descriptor into the used queue.
add_used(&mut self, desc_index: u16, len: u32) -> Result<(), VirtQueError>143     pub fn add_used(&mut self, desc_index: u16, len: u32) -> Result<(), VirtQueError> {
144         self.queue
145             .add_used(self.mem.memory().deref(), desc_index, len)
146     }
147 
148     /// Notify the vhost-user master that used descriptors have been put into the used queue.
signal_used_queue(&self) -> io::Result<()>149     pub fn signal_used_queue(&self) -> io::Result<()> {
150         if let Some(call) = self.call.as_ref() {
151             call.write(1)
152         } else {
153             Ok(())
154         }
155     }
156 
157     /// Enable event notification for queue.
enable_notification(&mut self) -> Result<bool, VirtQueError>158     pub fn enable_notification(&mut self) -> Result<bool, VirtQueError> {
159         self.queue.enable_notification(self.mem.memory().deref())
160     }
161 
162     /// Disable event notification for queue.
disable_notification(&mut self) -> Result<(), VirtQueError>163     pub fn disable_notification(&mut self) -> Result<(), VirtQueError> {
164         self.queue.disable_notification(self.mem.memory().deref())
165     }
166 
167     /// Check whether a notification to the guest is needed.
needs_notification(&mut self) -> Result<bool, VirtQueError>168     pub fn needs_notification(&mut self) -> Result<bool, VirtQueError> {
169         self.queue.needs_notification(self.mem.memory().deref())
170     }
171 
172     /// Set vring enabled state.
set_enabled(&mut self, enabled: bool)173     pub fn set_enabled(&mut self, enabled: bool) {
174         self.enabled = enabled;
175     }
176 
177     /// Set queue addresses for descriptor table, available ring and used ring.
set_queue_info( &mut self, desc_table: u64, avail_ring: u64, used_ring: u64, ) -> Result<(), VirtQueError>178     pub fn set_queue_info(
179         &mut self,
180         desc_table: u64,
181         avail_ring: u64,
182         used_ring: u64,
183     ) -> Result<(), VirtQueError> {
184         self.queue
185             .try_set_desc_table_address(GuestAddress(desc_table))?;
186         self.queue
187             .try_set_avail_ring_address(GuestAddress(avail_ring))?;
188         self.queue
189             .try_set_used_ring_address(GuestAddress(used_ring))
190     }
191 
192     /// Get queue next avail head.
queue_next_avail(&self) -> u16193     fn queue_next_avail(&self) -> u16 {
194         self.queue.next_avail()
195     }
196 
197     /// Set queue next avail head.
set_queue_next_avail(&mut self, base: u16)198     fn set_queue_next_avail(&mut self, base: u16) {
199         self.queue.set_next_avail(base);
200     }
201 
202     /// Set queue next used head.
set_queue_next_used(&mut self, idx: u16)203     fn set_queue_next_used(&mut self, idx: u16) {
204         self.queue.set_next_used(idx);
205     }
206 
207     /// Get queue next used head index from the guest memory.
queue_used_idx(&self) -> Result<u16, VirtQueError>208     fn queue_used_idx(&self) -> Result<u16, VirtQueError> {
209         self.queue
210             .used_idx(self.mem.memory().deref(), Ordering::Relaxed)
211             .map(|idx| idx.0)
212     }
213 
214     /// Set configured queue size.
set_queue_size(&mut self, num: u16)215     fn set_queue_size(&mut self, num: u16) {
216         self.queue.set_size(num);
217     }
218 
219     /// Enable/disable queue event index feature.
set_queue_event_idx(&mut self, enabled: bool)220     fn set_queue_event_idx(&mut self, enabled: bool) {
221         self.queue.set_event_idx(enabled);
222     }
223 
224     /// Set queue enabled state.
set_queue_ready(&mut self, ready: bool)225     fn set_queue_ready(&mut self, ready: bool) {
226         self.queue.set_ready(ready);
227     }
228 
229     /// Get the `EventFd` for kick.
get_kick(&self) -> &Option<EventFd>230     pub fn get_kick(&self) -> &Option<EventFd> {
231         &self.kick
232     }
233 
234     /// Set `EventFd` for kick.
set_kick(&mut self, file: Option<File>)235     fn set_kick(&mut self, file: Option<File>) {
236         // SAFETY:
237         // EventFd requires that it has sole ownership of its fd. So does File, so this is safe.
238         // Ideally, we'd have a generic way to refer to a uniquely-owned fd, such as that proposed
239         // by Rust RFC #3128.
240         self.kick = file.map(|f| unsafe { EventFd::from_raw_fd(f.into_raw_fd()) });
241     }
242 
243     /// Read event from the kick `EventFd`.
read_kick(&self) -> io::Result<bool>244     fn read_kick(&self) -> io::Result<bool> {
245         if let Some(kick) = &self.kick {
246             kick.read()?;
247         }
248 
249         Ok(self.enabled)
250     }
251 
252     /// Set `EventFd` for call.
set_call(&mut self, file: Option<File>)253     fn set_call(&mut self, file: Option<File>) {
254         // SAFETY: see comment in set_kick()
255         self.call = file.map(|f| unsafe { EventFd::from_raw_fd(f.into_raw_fd()) });
256     }
257 
258     /// Get the `EventFd` for call.
get_call(&self) -> &Option<EventFd>259     pub fn get_call(&self) -> &Option<EventFd> {
260         &self.call
261     }
262 
263     /// Set `EventFd` for err.
set_err(&mut self, file: Option<File>)264     fn set_err(&mut self, file: Option<File>) {
265         // SAFETY: see comment in set_kick()
266         self.err = file.map(|f| unsafe { EventFd::from_raw_fd(f.into_raw_fd()) });
267     }
268 }
269 
270 /// A `VringState` object protected by Mutex for multi-threading context.
271 #[derive(Clone)]
272 pub struct VringMutex<M: GuestAddressSpace = GuestMemoryAtomic<GuestMemoryMmap>> {
273     state: Arc<Mutex<VringState<M>>>,
274 }
275 
276 impl<M: GuestAddressSpace> VringMutex<M> {
277     /// Get a mutable guard to the underlying raw `VringState` object.
lock(&self) -> MutexGuard<VringState<M>>278     fn lock(&self) -> MutexGuard<VringState<M>> {
279         self.state.lock().unwrap()
280     }
281 }
282 
283 impl<'a, M: 'a + GuestAddressSpace> VringStateGuard<'a, M> for VringMutex<M> {
284     type G = MutexGuard<'a, VringState<M>>;
285 }
286 
287 impl<'a, M: 'a + GuestAddressSpace> VringStateMutGuard<'a, M> for VringMutex<M> {
288     type G = MutexGuard<'a, VringState<M>>;
289 }
290 
291 impl<M: 'static + GuestAddressSpace> VringT<M> for VringMutex<M> {
new(mem: M, max_queue_size: u16) -> Result<Self, VirtQueError>292     fn new(mem: M, max_queue_size: u16) -> Result<Self, VirtQueError> {
293         Ok(VringMutex {
294             state: Arc::new(Mutex::new(VringState::new(mem, max_queue_size)?)),
295         })
296     }
297 
get_ref(&self) -> <Self as VringStateGuard<M>>::G298     fn get_ref(&self) -> <Self as VringStateGuard<M>>::G {
299         self.state.lock().unwrap()
300     }
301 
get_mut(&self) -> <Self as VringStateMutGuard<M>>::G302     fn get_mut(&self) -> <Self as VringStateMutGuard<M>>::G {
303         self.lock()
304     }
305 
add_used(&self, desc_index: u16, len: u32) -> Result<(), VirtQueError>306     fn add_used(&self, desc_index: u16, len: u32) -> Result<(), VirtQueError> {
307         self.lock().add_used(desc_index, len)
308     }
309 
signal_used_queue(&self) -> io::Result<()>310     fn signal_used_queue(&self) -> io::Result<()> {
311         self.get_ref().signal_used_queue()
312     }
313 
enable_notification(&self) -> Result<bool, VirtQueError>314     fn enable_notification(&self) -> Result<bool, VirtQueError> {
315         self.lock().enable_notification()
316     }
317 
disable_notification(&self) -> Result<(), VirtQueError>318     fn disable_notification(&self) -> Result<(), VirtQueError> {
319         self.lock().disable_notification()
320     }
321 
needs_notification(&self) -> Result<bool, VirtQueError>322     fn needs_notification(&self) -> Result<bool, VirtQueError> {
323         self.lock().needs_notification()
324     }
325 
set_enabled(&self, enabled: bool)326     fn set_enabled(&self, enabled: bool) {
327         self.lock().set_enabled(enabled)
328     }
329 
set_queue_info( &self, desc_table: u64, avail_ring: u64, used_ring: u64, ) -> Result<(), VirtQueError>330     fn set_queue_info(
331         &self,
332         desc_table: u64,
333         avail_ring: u64,
334         used_ring: u64,
335     ) -> Result<(), VirtQueError> {
336         self.lock()
337             .set_queue_info(desc_table, avail_ring, used_ring)
338     }
339 
queue_next_avail(&self) -> u16340     fn queue_next_avail(&self) -> u16 {
341         self.get_ref().queue_next_avail()
342     }
343 
set_queue_next_avail(&self, base: u16)344     fn set_queue_next_avail(&self, base: u16) {
345         self.lock().set_queue_next_avail(base)
346     }
347 
set_queue_next_used(&self, idx: u16)348     fn set_queue_next_used(&self, idx: u16) {
349         self.lock().set_queue_next_used(idx)
350     }
351 
queue_used_idx(&self) -> Result<u16, VirtQueError>352     fn queue_used_idx(&self) -> Result<u16, VirtQueError> {
353         self.lock().queue_used_idx()
354     }
355 
set_queue_size(&self, num: u16)356     fn set_queue_size(&self, num: u16) {
357         self.lock().set_queue_size(num);
358     }
359 
set_queue_event_idx(&self, enabled: bool)360     fn set_queue_event_idx(&self, enabled: bool) {
361         self.lock().set_queue_event_idx(enabled);
362     }
363 
set_queue_ready(&self, ready: bool)364     fn set_queue_ready(&self, ready: bool) {
365         self.lock().set_queue_ready(ready);
366     }
367 
set_kick(&self, file: Option<File>)368     fn set_kick(&self, file: Option<File>) {
369         self.lock().set_kick(file);
370     }
371 
read_kick(&self) -> io::Result<bool>372     fn read_kick(&self) -> io::Result<bool> {
373         self.get_ref().read_kick()
374     }
375 
set_call(&self, file: Option<File>)376     fn set_call(&self, file: Option<File>) {
377         self.lock().set_call(file)
378     }
379 
set_err(&self, file: Option<File>)380     fn set_err(&self, file: Option<File>) {
381         self.lock().set_err(file)
382     }
383 }
384 
385 /// A `VringState` object protected by RwLock for multi-threading context.
386 #[derive(Clone)]
387 pub struct VringRwLock<M: GuestAddressSpace = GuestMemoryAtomic<GuestMemoryMmap>> {
388     state: Arc<RwLock<VringState<M>>>,
389 }
390 
391 impl<M: GuestAddressSpace> VringRwLock<M> {
392     /// Get a mutable guard to the underlying raw `VringState` object.
write_lock(&self) -> RwLockWriteGuard<VringState<M>>393     fn write_lock(&self) -> RwLockWriteGuard<VringState<M>> {
394         self.state.write().unwrap()
395     }
396 }
397 
398 impl<'a, M: 'a + GuestAddressSpace> VringStateGuard<'a, M> for VringRwLock<M> {
399     type G = RwLockReadGuard<'a, VringState<M>>;
400 }
401 
402 impl<'a, M: 'a + GuestAddressSpace> VringStateMutGuard<'a, M> for VringRwLock<M> {
403     type G = RwLockWriteGuard<'a, VringState<M>>;
404 }
405 
406 impl<M: 'static + GuestAddressSpace> VringT<M> for VringRwLock<M> {
new(mem: M, max_queue_size: u16) -> Result<Self, VirtQueError>407     fn new(mem: M, max_queue_size: u16) -> Result<Self, VirtQueError> {
408         Ok(VringRwLock {
409             state: Arc::new(RwLock::new(VringState::new(mem, max_queue_size)?)),
410         })
411     }
412 
get_ref(&self) -> <Self as VringStateGuard<M>>::G413     fn get_ref(&self) -> <Self as VringStateGuard<M>>::G {
414         self.state.read().unwrap()
415     }
416 
get_mut(&self) -> <Self as VringStateMutGuard<M>>::G417     fn get_mut(&self) -> <Self as VringStateMutGuard<M>>::G {
418         self.write_lock()
419     }
420 
add_used(&self, desc_index: u16, len: u32) -> Result<(), VirtQueError>421     fn add_used(&self, desc_index: u16, len: u32) -> Result<(), VirtQueError> {
422         self.write_lock().add_used(desc_index, len)
423     }
424 
signal_used_queue(&self) -> io::Result<()>425     fn signal_used_queue(&self) -> io::Result<()> {
426         self.get_ref().signal_used_queue()
427     }
428 
enable_notification(&self) -> Result<bool, VirtQueError>429     fn enable_notification(&self) -> Result<bool, VirtQueError> {
430         self.write_lock().enable_notification()
431     }
432 
disable_notification(&self) -> Result<(), VirtQueError>433     fn disable_notification(&self) -> Result<(), VirtQueError> {
434         self.write_lock().disable_notification()
435     }
436 
needs_notification(&self) -> Result<bool, VirtQueError>437     fn needs_notification(&self) -> Result<bool, VirtQueError> {
438         self.write_lock().needs_notification()
439     }
440 
set_enabled(&self, enabled: bool)441     fn set_enabled(&self, enabled: bool) {
442         self.write_lock().set_enabled(enabled)
443     }
444 
set_queue_info( &self, desc_table: u64, avail_ring: u64, used_ring: u64, ) -> Result<(), VirtQueError>445     fn set_queue_info(
446         &self,
447         desc_table: u64,
448         avail_ring: u64,
449         used_ring: u64,
450     ) -> Result<(), VirtQueError> {
451         self.write_lock()
452             .set_queue_info(desc_table, avail_ring, used_ring)
453     }
454 
queue_next_avail(&self) -> u16455     fn queue_next_avail(&self) -> u16 {
456         self.get_ref().queue_next_avail()
457     }
458 
set_queue_next_avail(&self, base: u16)459     fn set_queue_next_avail(&self, base: u16) {
460         self.write_lock().set_queue_next_avail(base)
461     }
462 
set_queue_next_used(&self, idx: u16)463     fn set_queue_next_used(&self, idx: u16) {
464         self.write_lock().set_queue_next_used(idx)
465     }
466 
queue_used_idx(&self) -> Result<u16, VirtQueError>467     fn queue_used_idx(&self) -> Result<u16, VirtQueError> {
468         self.get_ref().queue_used_idx()
469     }
470 
set_queue_size(&self, num: u16)471     fn set_queue_size(&self, num: u16) {
472         self.write_lock().set_queue_size(num);
473     }
474 
set_queue_event_idx(&self, enabled: bool)475     fn set_queue_event_idx(&self, enabled: bool) {
476         self.write_lock().set_queue_event_idx(enabled);
477     }
478 
set_queue_ready(&self, ready: bool)479     fn set_queue_ready(&self, ready: bool) {
480         self.write_lock().set_queue_ready(ready);
481     }
482 
set_kick(&self, file: Option<File>)483     fn set_kick(&self, file: Option<File>) {
484         self.write_lock().set_kick(file);
485     }
486 
read_kick(&self) -> io::Result<bool>487     fn read_kick(&self) -> io::Result<bool> {
488         self.get_ref().read_kick()
489     }
490 
set_call(&self, file: Option<File>)491     fn set_call(&self, file: Option<File>) {
492         self.write_lock().set_call(file)
493     }
494 
set_err(&self, file: Option<File>)495     fn set_err(&self, file: Option<File>) {
496         self.write_lock().set_err(file)
497     }
498 }
499 
500 #[cfg(test)]
501 mod tests {
502     use super::*;
503     use std::os::unix::io::AsRawFd;
504     use vm_memory::bitmap::AtomicBitmap;
505     use vmm_sys_util::eventfd::EventFd;
506 
507     #[test]
test_new_vring()508     fn test_new_vring() {
509         let mem = GuestMemoryAtomic::new(
510             GuestMemoryMmap::<AtomicBitmap>::from_ranges(&[(GuestAddress(0x100000), 0x10000)])
511                 .unwrap(),
512         );
513         let vring = VringMutex::new(mem, 0x1000).unwrap();
514 
515         assert!(vring.get_ref().get_kick().is_none());
516         assert!(!vring.get_mut().enabled);
517         assert!(!vring.lock().queue.ready());
518         assert!(!vring.lock().queue.event_idx_enabled());
519 
520         vring.set_enabled(true);
521         assert!(vring.get_ref().enabled);
522 
523         vring.set_queue_info(0x100100, 0x100200, 0x100300).unwrap();
524         assert_eq!(vring.lock().get_queue().desc_table(), 0x100100);
525         assert_eq!(vring.lock().get_queue().avail_ring(), 0x100200);
526         assert_eq!(vring.lock().get_queue().used_ring(), 0x100300);
527 
528         assert_eq!(vring.queue_next_avail(), 0);
529         vring.set_queue_next_avail(0x20);
530         assert_eq!(vring.queue_next_avail(), 0x20);
531 
532         vring.set_queue_size(0x200);
533         assert_eq!(vring.lock().queue.size(), 0x200);
534 
535         vring.set_queue_event_idx(true);
536         assert!(vring.lock().queue.event_idx_enabled());
537 
538         vring.set_queue_ready(true);
539         assert!(vring.lock().queue.ready());
540     }
541 
542     #[test]
test_vring_set_fd()543     fn test_vring_set_fd() {
544         let mem = GuestMemoryAtomic::new(
545             GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0x100000), 0x10000)]).unwrap(),
546         );
547         let vring = VringMutex::new(mem, 0x1000).unwrap();
548 
549         vring.set_enabled(true);
550         assert!(vring.get_ref().enabled);
551 
552         let eventfd = EventFd::new(0).unwrap();
553         // SAFETY: Safe because we panic before if eventfd is not valid.
554         let file = unsafe { File::from_raw_fd(eventfd.as_raw_fd()) };
555         assert!(vring.get_mut().kick.is_none());
556         assert!(vring.read_kick().unwrap());
557         vring.set_kick(Some(file));
558         eventfd.write(1).unwrap();
559         assert!(vring.read_kick().unwrap());
560         assert!(vring.get_ref().kick.is_some());
561         vring.set_kick(None);
562         assert!(vring.get_ref().kick.is_none());
563         std::mem::forget(eventfd);
564 
565         let eventfd = EventFd::new(0).unwrap();
566         // SAFETY: Safe because we panic before if eventfd is not valid.
567         let file = unsafe { File::from_raw_fd(eventfd.as_raw_fd()) };
568         assert!(vring.get_ref().call.is_none());
569         vring.set_call(Some(file));
570         assert!(vring.get_ref().call.is_some());
571         vring.set_call(None);
572         assert!(vring.get_ref().call.is_none());
573         std::mem::forget(eventfd);
574 
575         let eventfd = EventFd::new(0).unwrap();
576         // SAFETY: Safe because we panic before if eventfd is not valid.
577         let file = unsafe { File::from_raw_fd(eventfd.as_raw_fd()) };
578         assert!(vring.get_ref().err.is_none());
579         vring.set_err(Some(file));
580         assert!(vring.get_ref().err.is_some());
581         vring.set_err(None);
582         assert!(vring.get_ref().err.is_none());
583         std::mem::forget(eventfd);
584     }
585 }
586