1 // Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved.
2 // SPDX-License-Identifier: Apache-2.0
3 
4 //! Traits and Struct for vhost-user master.
5 
6 use std::fs::File;
7 use std::mem;
8 use std::os::unix::io::{AsRawFd, RawFd};
9 use std::os::unix::net::UnixStream;
10 use std::path::Path;
11 use std::sync::{Arc, Mutex, MutexGuard};
12 
13 use vm_memory::ByteValued;
14 use vmm_sys_util::eventfd::EventFd;
15 
16 use super::connection::Endpoint;
17 use super::message::*;
18 use super::{take_single_file, Error as VhostUserError, Result as VhostUserResult};
19 use crate::backend::{
20     VhostBackend, VhostUserDirtyLogRegion, VhostUserMemoryRegionInfo, VringConfigData,
21 };
22 use crate::{Error, Result};
23 
24 /// Trait for vhost-user master to provide extra methods not covered by the VhostBackend yet.
25 pub trait VhostUserMaster: VhostBackend {
26     /// Get the protocol feature bitmask from the underlying vhost implementation.
get_protocol_features(&mut self) -> Result<VhostUserProtocolFeatures>27     fn get_protocol_features(&mut self) -> Result<VhostUserProtocolFeatures>;
28 
29     /// Enable protocol features in the underlying vhost implementation.
set_protocol_features(&mut self, features: VhostUserProtocolFeatures) -> Result<()>30     fn set_protocol_features(&mut self, features: VhostUserProtocolFeatures) -> Result<()>;
31 
32     /// Query how many queues the backend supports.
get_queue_num(&mut self) -> Result<u64>33     fn get_queue_num(&mut self) -> Result<u64>;
34 
35     /// Signal slave to enable or disable corresponding vring.
36     ///
37     /// Slave must not pass data to/from the backend until ring is enabled by
38     /// VHOST_USER_SET_VRING_ENABLE with parameter 1, or after it has been
39     /// disabled by VHOST_USER_SET_VRING_ENABLE with parameter 0.
set_vring_enable(&mut self, queue_index: usize, enable: bool) -> Result<()>40     fn set_vring_enable(&mut self, queue_index: usize, enable: bool) -> Result<()>;
41 
42     /// Fetch the contents of the virtio device configuration space.
get_config( &mut self, offset: u32, size: u32, flags: VhostUserConfigFlags, buf: &[u8], ) -> Result<(VhostUserConfig, VhostUserConfigPayload)>43     fn get_config(
44         &mut self,
45         offset: u32,
46         size: u32,
47         flags: VhostUserConfigFlags,
48         buf: &[u8],
49     ) -> Result<(VhostUserConfig, VhostUserConfigPayload)>;
50 
51     /// Change the virtio device configuration space. It also can be used for live migration on the
52     /// destination host to set readonly configuration space fields.
set_config(&mut self, offset: u32, flags: VhostUserConfigFlags, buf: &[u8]) -> Result<()>53     fn set_config(&mut self, offset: u32, flags: VhostUserConfigFlags, buf: &[u8]) -> Result<()>;
54 
55     /// Setup slave communication channel.
set_slave_request_fd(&mut self, fd: &dyn AsRawFd) -> Result<()>56     fn set_slave_request_fd(&mut self, fd: &dyn AsRawFd) -> Result<()>;
57 
58     /// Retrieve shared buffer for inflight I/O tracking.
get_inflight_fd( &mut self, inflight: &VhostUserInflight, ) -> Result<(VhostUserInflight, File)>59     fn get_inflight_fd(
60         &mut self,
61         inflight: &VhostUserInflight,
62     ) -> Result<(VhostUserInflight, File)>;
63 
64     /// Set shared buffer for inflight I/O tracking.
set_inflight_fd(&mut self, inflight: &VhostUserInflight, fd: RawFd) -> Result<()>65     fn set_inflight_fd(&mut self, inflight: &VhostUserInflight, fd: RawFd) -> Result<()>;
66 
67     /// Query the maximum amount of memory slots supported by the backend.
get_max_mem_slots(&mut self) -> Result<u64>68     fn get_max_mem_slots(&mut self) -> Result<u64>;
69 
70     /// Add a new guest memory mapping for vhost to use.
add_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()>71     fn add_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()>;
72 
73     /// Remove a guest memory mapping from vhost.
remove_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()>74     fn remove_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()>;
75 }
76 
error_code<T>(err: VhostUserError) -> Result<T>77 fn error_code<T>(err: VhostUserError) -> Result<T> {
78     Err(Error::VhostUserProtocol(err))
79 }
80 
81 /// Struct for the vhost-user master endpoint.
82 #[derive(Clone)]
83 pub struct Master {
84     node: Arc<Mutex<MasterInternal>>,
85 }
86 
87 impl Master {
88     /// Create a new instance.
new(ep: Endpoint<MasterReq>, max_queue_num: u64) -> Self89     fn new(ep: Endpoint<MasterReq>, max_queue_num: u64) -> Self {
90         Master {
91             node: Arc::new(Mutex::new(MasterInternal {
92                 main_sock: ep,
93                 virtio_features: 0,
94                 acked_virtio_features: 0,
95                 protocol_features: 0,
96                 acked_protocol_features: 0,
97                 protocol_features_ready: false,
98                 max_queue_num,
99                 error: None,
100                 hdr_flags: VhostUserHeaderFlag::empty(),
101             })),
102         }
103     }
104 
node(&self) -> MutexGuard<MasterInternal>105     fn node(&self) -> MutexGuard<MasterInternal> {
106         self.node.lock().unwrap()
107     }
108 
109     /// Create a new instance from a Unix stream socket.
from_stream(sock: UnixStream, max_queue_num: u64) -> Self110     pub fn from_stream(sock: UnixStream, max_queue_num: u64) -> Self {
111         Self::new(Endpoint::<MasterReq>::from_stream(sock), max_queue_num)
112     }
113 
114     /// Create a new vhost-user master endpoint.
115     ///
116     /// Will retry as the backend may not be ready to accept the connection.
117     ///
118     /// # Arguments
119     /// * `path` - path of Unix domain socket listener to connect to
connect<P: AsRef<Path>>(path: P, max_queue_num: u64) -> Result<Self>120     pub fn connect<P: AsRef<Path>>(path: P, max_queue_num: u64) -> Result<Self> {
121         let mut retry_count = 5;
122         let endpoint = loop {
123             match Endpoint::<MasterReq>::connect(&path) {
124                 Ok(endpoint) => break Ok(endpoint),
125                 Err(e) => match &e {
126                     VhostUserError::SocketConnect(why) => {
127                         if why.kind() == std::io::ErrorKind::ConnectionRefused && retry_count > 0 {
128                             std::thread::sleep(std::time::Duration::from_millis(100));
129                             retry_count -= 1;
130                             continue;
131                         } else {
132                             break Err(e);
133                         }
134                     }
135                     _ => break Err(e),
136                 },
137             }
138         }?;
139 
140         Ok(Self::new(endpoint, max_queue_num))
141     }
142 
143     /// Set the header flags that should be applied to all following messages.
set_hdr_flags(&self, flags: VhostUserHeaderFlag)144     pub fn set_hdr_flags(&self, flags: VhostUserHeaderFlag) {
145         let mut node = self.node();
146         node.hdr_flags = flags;
147     }
148 }
149 
150 impl VhostBackend for Master {
151     /// Get from the underlying vhost implementation the feature bitmask.
get_features(&self) -> Result<u64>152     fn get_features(&self) -> Result<u64> {
153         let mut node = self.node();
154         let hdr = node.send_request_header(MasterReq::GET_FEATURES, None)?;
155         let val = node.recv_reply::<VhostUserU64>(&hdr)?;
156         node.virtio_features = val.value;
157         Ok(node.virtio_features)
158     }
159 
160     /// Enable features in the underlying vhost implementation using a bitmask.
set_features(&self, features: u64) -> Result<()>161     fn set_features(&self, features: u64) -> Result<()> {
162         let mut node = self.node();
163         let val = VhostUserU64::new(features);
164         let hdr = node.send_request_with_body(MasterReq::SET_FEATURES, &val, None)?;
165         node.acked_virtio_features = features & node.virtio_features;
166         node.wait_for_ack(&hdr).map_err(|e| e.into())
167     }
168 
169     /// Set the current Master as an owner of the session.
set_owner(&self) -> Result<()>170     fn set_owner(&self) -> Result<()> {
171         // We unwrap() the return value to assert that we are not expecting threads to ever fail
172         // while holding the lock.
173         let mut node = self.node();
174         let hdr = node.send_request_header(MasterReq::SET_OWNER, None)?;
175         node.wait_for_ack(&hdr).map_err(|e| e.into())
176     }
177 
reset_owner(&self) -> Result<()>178     fn reset_owner(&self) -> Result<()> {
179         let mut node = self.node();
180         let hdr = node.send_request_header(MasterReq::RESET_OWNER, None)?;
181         node.wait_for_ack(&hdr).map_err(|e| e.into())
182     }
183 
184     /// Set the memory map regions on the slave so it can translate the vring
185     /// addresses. In the ancillary data there is an array of file descriptors
set_mem_table(&self, regions: &[VhostUserMemoryRegionInfo]) -> Result<()>186     fn set_mem_table(&self, regions: &[VhostUserMemoryRegionInfo]) -> Result<()> {
187         if regions.is_empty() || regions.len() > MAX_ATTACHED_FD_ENTRIES {
188             return error_code(VhostUserError::InvalidParam);
189         }
190 
191         let mut ctx = VhostUserMemoryContext::new();
192         for region in regions.iter() {
193             if region.memory_size == 0 || region.mmap_handle < 0 {
194                 return error_code(VhostUserError::InvalidParam);
195             }
196 
197             ctx.append(&region.to_region(), region.mmap_handle);
198         }
199 
200         let mut node = self.node();
201         let body = VhostUserMemory::new(ctx.regions.len() as u32);
202         // SAFETY: Safe because ctx.regions is a valid Vec() at this point.
203         let (_, payload, _) = unsafe { ctx.regions.align_to::<u8>() };
204         let hdr = node.send_request_with_payload(
205             MasterReq::SET_MEM_TABLE,
206             &body,
207             payload,
208             Some(ctx.fds.as_slice()),
209         )?;
210         node.wait_for_ack(&hdr).map_err(|e| e.into())
211     }
212 
213     // Clippy doesn't seem to know that if let with && is still experimental
214     #[allow(clippy::unnecessary_unwrap)]
set_log_base(&self, base: u64, region: Option<VhostUserDirtyLogRegion>) -> Result<()>215     fn set_log_base(&self, base: u64, region: Option<VhostUserDirtyLogRegion>) -> Result<()> {
216         let mut node = self.node();
217         let val = VhostUserU64::new(base);
218 
219         if node.acked_protocol_features & VhostUserProtocolFeatures::LOG_SHMFD.bits() != 0
220             && region.is_some()
221         {
222             let region = region.unwrap();
223             let log = VhostUserLog {
224                 mmap_size: region.mmap_size,
225                 mmap_offset: region.mmap_offset,
226             };
227             let hdr = node.send_request_with_body(
228                 MasterReq::SET_LOG_BASE,
229                 &log,
230                 Some(&[region.mmap_handle]),
231             )?;
232             node.wait_for_ack(&hdr).map_err(|e| e.into())
233         } else {
234             let _ = node.send_request_with_body(MasterReq::SET_LOG_BASE, &val, None)?;
235             Ok(())
236         }
237     }
238 
set_log_fd(&self, fd: RawFd) -> Result<()>239     fn set_log_fd(&self, fd: RawFd) -> Result<()> {
240         let mut node = self.node();
241         let fds = [fd];
242         let hdr = node.send_request_header(MasterReq::SET_LOG_FD, Some(&fds))?;
243         node.wait_for_ack(&hdr).map_err(|e| e.into())
244     }
245 
246     /// Set the size of the queue.
set_vring_num(&self, queue_index: usize, num: u16) -> Result<()>247     fn set_vring_num(&self, queue_index: usize, num: u16) -> Result<()> {
248         let mut node = self.node();
249         if queue_index as u64 >= node.max_queue_num {
250             return error_code(VhostUserError::InvalidParam);
251         }
252 
253         let val = VhostUserVringState::new(queue_index as u32, num.into());
254         let hdr = node.send_request_with_body(MasterReq::SET_VRING_NUM, &val, None)?;
255         node.wait_for_ack(&hdr).map_err(|e| e.into())
256     }
257 
258     /// Sets the addresses of the different aspects of the vring.
set_vring_addr(&self, queue_index: usize, config_data: &VringConfigData) -> Result<()>259     fn set_vring_addr(&self, queue_index: usize, config_data: &VringConfigData) -> Result<()> {
260         let mut node = self.node();
261         if queue_index as u64 >= node.max_queue_num
262             || config_data.flags & !(VhostUserVringAddrFlags::all().bits()) != 0
263         {
264             return error_code(VhostUserError::InvalidParam);
265         }
266 
267         let val = VhostUserVringAddr::from_config_data(queue_index as u32, config_data);
268         let hdr = node.send_request_with_body(MasterReq::SET_VRING_ADDR, &val, None)?;
269         node.wait_for_ack(&hdr).map_err(|e| e.into())
270     }
271 
272     /// Sets the base offset in the available vring.
set_vring_base(&self, queue_index: usize, base: u16) -> Result<()>273     fn set_vring_base(&self, queue_index: usize, base: u16) -> Result<()> {
274         let mut node = self.node();
275         if queue_index as u64 >= node.max_queue_num {
276             return error_code(VhostUserError::InvalidParam);
277         }
278 
279         let val = VhostUserVringState::new(queue_index as u32, base.into());
280         let hdr = node.send_request_with_body(MasterReq::SET_VRING_BASE, &val, None)?;
281         node.wait_for_ack(&hdr).map_err(|e| e.into())
282     }
283 
get_vring_base(&self, queue_index: usize) -> Result<u32>284     fn get_vring_base(&self, queue_index: usize) -> Result<u32> {
285         let mut node = self.node();
286         if queue_index as u64 >= node.max_queue_num {
287             return error_code(VhostUserError::InvalidParam);
288         }
289 
290         let req = VhostUserVringState::new(queue_index as u32, 0);
291         let hdr = node.send_request_with_body(MasterReq::GET_VRING_BASE, &req, None)?;
292         let reply = node.recv_reply::<VhostUserVringState>(&hdr)?;
293         Ok(reply.num)
294     }
295 
296     /// Set the event file descriptor to signal when buffers are used.
297     /// Bits (0-7) of the payload contain the vring index. Bit 8 is the invalid FD flag. This flag
298     /// is set when there is no file descriptor in the ancillary data. This signals that polling
299     /// will be used instead of waiting for the call.
set_vring_call(&self, queue_index: usize, fd: &EventFd) -> Result<()>300     fn set_vring_call(&self, queue_index: usize, fd: &EventFd) -> Result<()> {
301         let mut node = self.node();
302         if queue_index as u64 >= node.max_queue_num {
303             return error_code(VhostUserError::InvalidParam);
304         }
305         let hdr = node.send_fd_for_vring(MasterReq::SET_VRING_CALL, queue_index, fd.as_raw_fd())?;
306         node.wait_for_ack(&hdr).map_err(|e| e.into())
307     }
308 
309     /// Set the event file descriptor for adding buffers to the vring.
310     /// Bits (0-7) of the payload contain the vring index. Bit 8 is the invalid FD flag. This flag
311     /// is set when there is no file descriptor in the ancillary data. This signals that polling
312     /// should be used instead of waiting for a kick.
set_vring_kick(&self, queue_index: usize, fd: &EventFd) -> Result<()>313     fn set_vring_kick(&self, queue_index: usize, fd: &EventFd) -> Result<()> {
314         let mut node = self.node();
315         if queue_index as u64 >= node.max_queue_num {
316             return error_code(VhostUserError::InvalidParam);
317         }
318         let hdr = node.send_fd_for_vring(MasterReq::SET_VRING_KICK, queue_index, fd.as_raw_fd())?;
319         node.wait_for_ack(&hdr).map_err(|e| e.into())
320     }
321 
322     /// Set the event file descriptor to signal when error occurs.
323     /// Bits (0-7) of the payload contain the vring index. Bit 8 is the invalid FD flag. This flag
324     /// is set when there is no file descriptor in the ancillary data.
set_vring_err(&self, queue_index: usize, fd: &EventFd) -> Result<()>325     fn set_vring_err(&self, queue_index: usize, fd: &EventFd) -> Result<()> {
326         let mut node = self.node();
327         if queue_index as u64 >= node.max_queue_num {
328             return error_code(VhostUserError::InvalidParam);
329         }
330         let hdr = node.send_fd_for_vring(MasterReq::SET_VRING_ERR, queue_index, fd.as_raw_fd())?;
331         node.wait_for_ack(&hdr).map_err(|e| e.into())
332     }
333 }
334 
335 impl VhostUserMaster for Master {
get_protocol_features(&mut self) -> Result<VhostUserProtocolFeatures>336     fn get_protocol_features(&mut self) -> Result<VhostUserProtocolFeatures> {
337         let mut node = self.node();
338         node.check_feature(VhostUserVirtioFeatures::PROTOCOL_FEATURES)?;
339         let hdr = node.send_request_header(MasterReq::GET_PROTOCOL_FEATURES, None)?;
340         let val = node.recv_reply::<VhostUserU64>(&hdr)?;
341         node.protocol_features = val.value;
342         // Should we support forward compatibility?
343         // If so just mask out unrecognized flags instead of return errors.
344         match VhostUserProtocolFeatures::from_bits(node.protocol_features) {
345             Some(val) => Ok(val),
346             None => error_code(VhostUserError::InvalidMessage),
347         }
348     }
349 
set_protocol_features(&mut self, features: VhostUserProtocolFeatures) -> Result<()>350     fn set_protocol_features(&mut self, features: VhostUserProtocolFeatures) -> Result<()> {
351         let mut node = self.node();
352         node.check_feature(VhostUserVirtioFeatures::PROTOCOL_FEATURES)?;
353         let val = VhostUserU64::new(features.bits());
354         let hdr = node.send_request_with_body(MasterReq::SET_PROTOCOL_FEATURES, &val, None)?;
355         // Don't wait for ACK here because the protocol feature negotiation process hasn't been
356         // completed yet.
357         node.acked_protocol_features = features.bits();
358         node.protocol_features_ready = true;
359         node.wait_for_ack(&hdr).map_err(|e| e.into())
360     }
361 
get_queue_num(&mut self) -> Result<u64>362     fn get_queue_num(&mut self) -> Result<u64> {
363         let mut node = self.node();
364         node.check_proto_feature(VhostUserProtocolFeatures::MQ)?;
365 
366         let hdr = node.send_request_header(MasterReq::GET_QUEUE_NUM, None)?;
367         let val = node.recv_reply::<VhostUserU64>(&hdr)?;
368         if val.value > VHOST_USER_MAX_VRINGS {
369             return error_code(VhostUserError::InvalidMessage);
370         }
371         node.max_queue_num = val.value;
372         Ok(node.max_queue_num)
373     }
374 
set_vring_enable(&mut self, queue_index: usize, enable: bool) -> Result<()>375     fn set_vring_enable(&mut self, queue_index: usize, enable: bool) -> Result<()> {
376         let mut node = self.node();
377         // set_vring_enable() is supported only when PROTOCOL_FEATURES has been enabled.
378         if node.acked_virtio_features & VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits() == 0 {
379             return error_code(VhostUserError::InactiveFeature(
380                 VhostUserVirtioFeatures::PROTOCOL_FEATURES,
381             ));
382         } else if queue_index as u64 >= node.max_queue_num {
383             return error_code(VhostUserError::InvalidParam);
384         }
385 
386         let flag = if enable { 1 } else { 0 };
387         let val = VhostUserVringState::new(queue_index as u32, flag);
388         let hdr = node.send_request_with_body(MasterReq::SET_VRING_ENABLE, &val, None)?;
389         node.wait_for_ack(&hdr).map_err(|e| e.into())
390     }
391 
get_config( &mut self, offset: u32, size: u32, flags: VhostUserConfigFlags, buf: &[u8], ) -> Result<(VhostUserConfig, VhostUserConfigPayload)>392     fn get_config(
393         &mut self,
394         offset: u32,
395         size: u32,
396         flags: VhostUserConfigFlags,
397         buf: &[u8],
398     ) -> Result<(VhostUserConfig, VhostUserConfigPayload)> {
399         let body = VhostUserConfig::new(offset, size, flags);
400         if !body.is_valid() {
401             return error_code(VhostUserError::InvalidParam);
402         }
403 
404         let mut node = self.node();
405         // depends on VhostUserProtocolFeatures::CONFIG
406         node.check_proto_feature(VhostUserProtocolFeatures::CONFIG)?;
407 
408         // vhost-user spec states that:
409         // "Master payload: virtio device config space"
410         // "Slave payload: virtio device config space"
411         let hdr = node.send_request_with_payload(MasterReq::GET_CONFIG, &body, buf, None)?;
412         let (body_reply, buf_reply, rfds) =
413             node.recv_reply_with_payload::<VhostUserConfig>(&hdr)?;
414         if rfds.is_some() {
415             return error_code(VhostUserError::InvalidMessage);
416         } else if body_reply.size == 0 {
417             return error_code(VhostUserError::SlaveInternalError);
418         } else if body_reply.size != body.size
419             || body_reply.size as usize != buf.len()
420             || body_reply.offset != body.offset
421         {
422             return error_code(VhostUserError::InvalidMessage);
423         }
424 
425         Ok((body_reply, buf_reply))
426     }
427 
set_config(&mut self, offset: u32, flags: VhostUserConfigFlags, buf: &[u8]) -> Result<()>428     fn set_config(&mut self, offset: u32, flags: VhostUserConfigFlags, buf: &[u8]) -> Result<()> {
429         if buf.len() > MAX_MSG_SIZE {
430             return error_code(VhostUserError::InvalidParam);
431         }
432         let body = VhostUserConfig::new(offset, buf.len() as u32, flags);
433         if !body.is_valid() {
434             return error_code(VhostUserError::InvalidParam);
435         }
436 
437         let mut node = self.node();
438         // depends on VhostUserProtocolFeatures::CONFIG
439         node.check_proto_feature(VhostUserProtocolFeatures::CONFIG)?;
440 
441         let hdr = node.send_request_with_payload(MasterReq::SET_CONFIG, &body, buf, None)?;
442         node.wait_for_ack(&hdr).map_err(|e| e.into())
443     }
444 
set_slave_request_fd(&mut self, fd: &dyn AsRawFd) -> Result<()>445     fn set_slave_request_fd(&mut self, fd: &dyn AsRawFd) -> Result<()> {
446         let mut node = self.node();
447         node.check_proto_feature(VhostUserProtocolFeatures::SLAVE_REQ)?;
448         let fds = [fd.as_raw_fd()];
449         let hdr = node.send_request_header(MasterReq::SET_SLAVE_REQ_FD, Some(&fds))?;
450         node.wait_for_ack(&hdr).map_err(|e| e.into())
451     }
452 
get_inflight_fd( &mut self, inflight: &VhostUserInflight, ) -> Result<(VhostUserInflight, File)>453     fn get_inflight_fd(
454         &mut self,
455         inflight: &VhostUserInflight,
456     ) -> Result<(VhostUserInflight, File)> {
457         let mut node = self.node();
458         node.check_proto_feature(VhostUserProtocolFeatures::INFLIGHT_SHMFD)?;
459 
460         let hdr = node.send_request_with_body(MasterReq::GET_INFLIGHT_FD, inflight, None)?;
461         let (inflight, files) = node.recv_reply_with_files::<VhostUserInflight>(&hdr)?;
462 
463         match take_single_file(files) {
464             Some(file) => Ok((inflight, file)),
465             None => error_code(VhostUserError::IncorrectFds),
466         }
467     }
468 
set_inflight_fd(&mut self, inflight: &VhostUserInflight, fd: RawFd) -> Result<()>469     fn set_inflight_fd(&mut self, inflight: &VhostUserInflight, fd: RawFd) -> Result<()> {
470         let mut node = self.node();
471         node.check_proto_feature(VhostUserProtocolFeatures::INFLIGHT_SHMFD)?;
472 
473         if inflight.mmap_size == 0 || inflight.num_queues == 0 || inflight.queue_size == 0 || fd < 0
474         {
475             return error_code(VhostUserError::InvalidParam);
476         }
477 
478         let hdr = node.send_request_with_body(MasterReq::SET_INFLIGHT_FD, inflight, Some(&[fd]))?;
479         node.wait_for_ack(&hdr).map_err(|e| e.into())
480     }
481 
get_max_mem_slots(&mut self) -> Result<u64>482     fn get_max_mem_slots(&mut self) -> Result<u64> {
483         let mut node = self.node();
484         node.check_proto_feature(VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS)?;
485 
486         let hdr = node.send_request_header(MasterReq::GET_MAX_MEM_SLOTS, None)?;
487         let val = node.recv_reply::<VhostUserU64>(&hdr)?;
488 
489         Ok(val.value)
490     }
491 
add_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()>492     fn add_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()> {
493         let mut node = self.node();
494         node.check_proto_feature(VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS)?;
495         if region.memory_size == 0 || region.mmap_handle < 0 {
496             return error_code(VhostUserError::InvalidParam);
497         }
498 
499         let body = region.to_single_region();
500         let fds = [region.mmap_handle];
501         let hdr = node.send_request_with_body(MasterReq::ADD_MEM_REG, &body, Some(&fds))?;
502         node.wait_for_ack(&hdr).map_err(|e| e.into())
503     }
504 
remove_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()>505     fn remove_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()> {
506         let mut node = self.node();
507         node.check_proto_feature(VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS)?;
508         if region.memory_size == 0 {
509             return error_code(VhostUserError::InvalidParam);
510         }
511 
512         let body = region.to_single_region();
513         let hdr = node.send_request_with_body(MasterReq::REM_MEM_REG, &body, None)?;
514         node.wait_for_ack(&hdr).map_err(|e| e.into())
515     }
516 }
517 
518 impl AsRawFd for Master {
as_raw_fd(&self) -> RawFd519     fn as_raw_fd(&self) -> RawFd {
520         let node = self.node();
521         node.main_sock.as_raw_fd()
522     }
523 }
524 
525 /// Context object to pass guest memory configuration to VhostUserMaster::set_mem_table().
526 struct VhostUserMemoryContext {
527     regions: VhostUserMemoryPayload,
528     fds: Vec<RawFd>,
529 }
530 
531 impl VhostUserMemoryContext {
532     /// Create a context object.
new() -> Self533     pub fn new() -> Self {
534         VhostUserMemoryContext {
535             regions: VhostUserMemoryPayload::new(),
536             fds: Vec::new(),
537         }
538     }
539 
540     /// Append a user memory region and corresponding RawFd into the context object.
append(&mut self, region: &VhostUserMemoryRegion, fd: RawFd)541     pub fn append(&mut self, region: &VhostUserMemoryRegion, fd: RawFd) {
542         self.regions.push(*region);
543         self.fds.push(fd);
544     }
545 }
546 
547 struct MasterInternal {
548     // Used to send requests to the slave.
549     main_sock: Endpoint<MasterReq>,
550     // Cached virtio features from the slave.
551     virtio_features: u64,
552     // Cached acked virtio features from the driver.
553     acked_virtio_features: u64,
554     // Cached vhost-user protocol features from the slave.
555     protocol_features: u64,
556     // Cached vhost-user protocol features.
557     acked_protocol_features: u64,
558     // Cached vhost-user protocol features are ready to use.
559     protocol_features_ready: bool,
560     // Cached maxinum number of queues supported from the slave.
561     max_queue_num: u64,
562     // Internal flag to mark failure state.
563     error: Option<i32>,
564     // List of header flags.
565     hdr_flags: VhostUserHeaderFlag,
566 }
567 
568 impl MasterInternal {
send_request_header( &mut self, code: MasterReq, fds: Option<&[RawFd]>, ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>>569     fn send_request_header(
570         &mut self,
571         code: MasterReq,
572         fds: Option<&[RawFd]>,
573     ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>> {
574         self.check_state()?;
575         let hdr = self.new_request_header(code, 0);
576         self.main_sock.send_header(&hdr, fds)?;
577         Ok(hdr)
578     }
579 
send_request_with_body<T: ByteValued>( &mut self, code: MasterReq, msg: &T, fds: Option<&[RawFd]>, ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>>580     fn send_request_with_body<T: ByteValued>(
581         &mut self,
582         code: MasterReq,
583         msg: &T,
584         fds: Option<&[RawFd]>,
585     ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>> {
586         if mem::size_of::<T>() > MAX_MSG_SIZE {
587             return Err(VhostUserError::InvalidParam);
588         }
589         self.check_state()?;
590 
591         let hdr = self.new_request_header(code, mem::size_of::<T>() as u32);
592         self.main_sock.send_message(&hdr, msg, fds)?;
593         Ok(hdr)
594     }
595 
send_request_with_payload<T: ByteValued>( &mut self, code: MasterReq, msg: &T, payload: &[u8], fds: Option<&[RawFd]>, ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>>596     fn send_request_with_payload<T: ByteValued>(
597         &mut self,
598         code: MasterReq,
599         msg: &T,
600         payload: &[u8],
601         fds: Option<&[RawFd]>,
602     ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>> {
603         let len = mem::size_of::<T>() + payload.len();
604         if len > MAX_MSG_SIZE {
605             return Err(VhostUserError::InvalidParam);
606         }
607         if let Some(fd_arr) = fds {
608             if fd_arr.len() > MAX_ATTACHED_FD_ENTRIES {
609                 return Err(VhostUserError::InvalidParam);
610             }
611         }
612         self.check_state()?;
613 
614         let hdr = self.new_request_header(code, len as u32);
615         self.main_sock
616             .send_message_with_payload(&hdr, msg, payload, fds)?;
617         Ok(hdr)
618     }
619 
send_fd_for_vring( &mut self, code: MasterReq, queue_index: usize, fd: RawFd, ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>>620     fn send_fd_for_vring(
621         &mut self,
622         code: MasterReq,
623         queue_index: usize,
624         fd: RawFd,
625     ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>> {
626         if queue_index as u64 >= self.max_queue_num {
627             return Err(VhostUserError::InvalidParam);
628         }
629         self.check_state()?;
630 
631         // Bits (0-7) of the payload contain the vring index. Bit 8 is the invalid FD flag.
632         // This flag is set when there is no file descriptor in the ancillary data. This signals
633         // that polling will be used instead of waiting for the call.
634         let msg = VhostUserU64::new(queue_index as u64);
635         let hdr = self.new_request_header(code, mem::size_of::<VhostUserU64>() as u32);
636         self.main_sock.send_message(&hdr, &msg, Some(&[fd]))?;
637         Ok(hdr)
638     }
639 
recv_reply<T: ByteValued + Sized + VhostUserMsgValidator>( &mut self, hdr: &VhostUserMsgHeader<MasterReq>, ) -> VhostUserResult<T>640     fn recv_reply<T: ByteValued + Sized + VhostUserMsgValidator>(
641         &mut self,
642         hdr: &VhostUserMsgHeader<MasterReq>,
643     ) -> VhostUserResult<T> {
644         if mem::size_of::<T>() > MAX_MSG_SIZE || hdr.is_reply() {
645             return Err(VhostUserError::InvalidParam);
646         }
647         self.check_state()?;
648 
649         let (reply, body, rfds) = self.main_sock.recv_body::<T>()?;
650         if !reply.is_reply_for(hdr) || rfds.is_some() || !body.is_valid() {
651             return Err(VhostUserError::InvalidMessage);
652         }
653         Ok(body)
654     }
655 
recv_reply_with_files<T: ByteValued + Sized + VhostUserMsgValidator>( &mut self, hdr: &VhostUserMsgHeader<MasterReq>, ) -> VhostUserResult<(T, Option<Vec<File>>)>656     fn recv_reply_with_files<T: ByteValued + Sized + VhostUserMsgValidator>(
657         &mut self,
658         hdr: &VhostUserMsgHeader<MasterReq>,
659     ) -> VhostUserResult<(T, Option<Vec<File>>)> {
660         if mem::size_of::<T>() > MAX_MSG_SIZE || hdr.is_reply() {
661             return Err(VhostUserError::InvalidParam);
662         }
663         self.check_state()?;
664 
665         let (reply, body, files) = self.main_sock.recv_body::<T>()?;
666         if !reply.is_reply_for(hdr) || files.is_none() || !body.is_valid() {
667             return Err(VhostUserError::InvalidMessage);
668         }
669         Ok((body, files))
670     }
671 
recv_reply_with_payload<T: ByteValued + Sized + VhostUserMsgValidator>( &mut self, hdr: &VhostUserMsgHeader<MasterReq>, ) -> VhostUserResult<(T, Vec<u8>, Option<Vec<File>>)>672     fn recv_reply_with_payload<T: ByteValued + Sized + VhostUserMsgValidator>(
673         &mut self,
674         hdr: &VhostUserMsgHeader<MasterReq>,
675     ) -> VhostUserResult<(T, Vec<u8>, Option<Vec<File>>)> {
676         if mem::size_of::<T>() > MAX_MSG_SIZE
677             || hdr.get_size() as usize <= mem::size_of::<T>()
678             || hdr.get_size() as usize > MAX_MSG_SIZE
679             || hdr.is_reply()
680         {
681             return Err(VhostUserError::InvalidParam);
682         }
683         self.check_state()?;
684 
685         let mut buf: Vec<u8> = vec![0; hdr.get_size() as usize - mem::size_of::<T>()];
686         let (reply, body, bytes, files) = self.main_sock.recv_payload_into_buf::<T>(&mut buf)?;
687         if !reply.is_reply_for(hdr)
688             || reply.get_size() as usize != mem::size_of::<T>() + bytes
689             || files.is_some()
690             || !body.is_valid()
691             || bytes != buf.len()
692         {
693             return Err(VhostUserError::InvalidMessage);
694         }
695 
696         Ok((body, buf, files))
697     }
698 
wait_for_ack(&mut self, hdr: &VhostUserMsgHeader<MasterReq>) -> VhostUserResult<()>699     fn wait_for_ack(&mut self, hdr: &VhostUserMsgHeader<MasterReq>) -> VhostUserResult<()> {
700         if self.acked_protocol_features & VhostUserProtocolFeatures::REPLY_ACK.bits() == 0
701             || !hdr.is_need_reply()
702         {
703             return Ok(());
704         }
705         self.check_state()?;
706 
707         let (reply, body, rfds) = self.main_sock.recv_body::<VhostUserU64>()?;
708         if !reply.is_reply_for(hdr) || rfds.is_some() || !body.is_valid() {
709             return Err(VhostUserError::InvalidMessage);
710         }
711         if body.value != 0 {
712             return Err(VhostUserError::SlaveInternalError);
713         }
714         Ok(())
715     }
716 
check_feature(&self, feat: VhostUserVirtioFeatures) -> VhostUserResult<()>717     fn check_feature(&self, feat: VhostUserVirtioFeatures) -> VhostUserResult<()> {
718         if self.virtio_features & feat.bits() != 0 {
719             Ok(())
720         } else {
721             Err(VhostUserError::InactiveFeature(feat))
722         }
723     }
724 
check_proto_feature(&self, feat: VhostUserProtocolFeatures) -> VhostUserResult<()>725     fn check_proto_feature(&self, feat: VhostUserProtocolFeatures) -> VhostUserResult<()> {
726         if self.acked_protocol_features & feat.bits() != 0 {
727             Ok(())
728         } else {
729             Err(VhostUserError::InactiveOperation(feat))
730         }
731     }
732 
check_state(&self) -> VhostUserResult<()>733     fn check_state(&self) -> VhostUserResult<()> {
734         match self.error {
735             Some(e) => Err(VhostUserError::SocketBroken(
736                 std::io::Error::from_raw_os_error(e),
737             )),
738             None => Ok(()),
739         }
740     }
741 
742     #[inline]
new_request_header(&self, request: MasterReq, size: u32) -> VhostUserMsgHeader<MasterReq>743     fn new_request_header(&self, request: MasterReq, size: u32) -> VhostUserMsgHeader<MasterReq> {
744         VhostUserMsgHeader::new(request, self.hdr_flags.bits() | 0x1, size)
745     }
746 }
747 
748 #[cfg(test)]
749 mod tests {
750     use super::super::connection::Listener;
751     use super::*;
752     use vmm_sys_util::rand::rand_alphanumerics;
753 
754     use std::path::PathBuf;
755 
temp_path() -> PathBuf756     fn temp_path() -> PathBuf {
757         PathBuf::from(format!(
758             "/tmp/vhost_test_{}",
759             rand_alphanumerics(8).to_str().unwrap()
760         ))
761     }
762 
create_pair<P: AsRef<Path>>(path: P) -> (Master, Endpoint<MasterReq>)763     fn create_pair<P: AsRef<Path>>(path: P) -> (Master, Endpoint<MasterReq>) {
764         let listener = Listener::new(&path, true).unwrap();
765         listener.set_nonblocking(true).unwrap();
766         let master = Master::connect(path, 2).unwrap();
767         let slave = listener.accept().unwrap().unwrap();
768         (master, Endpoint::from_stream(slave))
769     }
770 
771     #[test]
create_master()772     fn create_master() {
773         let path = temp_path();
774         let listener = Listener::new(&path, true).unwrap();
775         listener.set_nonblocking(true).unwrap();
776 
777         let master = Master::connect(&path, 1).unwrap();
778         let mut slave = Endpoint::<MasterReq>::from_stream(listener.accept().unwrap().unwrap());
779 
780         assert!(master.as_raw_fd() > 0);
781         // Send two messages continuously
782         master.set_owner().unwrap();
783         master.reset_owner().unwrap();
784 
785         let (hdr, rfds) = slave.recv_header().unwrap();
786         assert_eq!(hdr.get_code().unwrap(), MasterReq::SET_OWNER);
787         assert_eq!(hdr.get_size(), 0);
788         assert_eq!(hdr.get_version(), 0x1);
789         assert!(rfds.is_none());
790 
791         let (hdr, rfds) = slave.recv_header().unwrap();
792         assert_eq!(hdr.get_code().unwrap(), MasterReq::RESET_OWNER);
793         assert_eq!(hdr.get_size(), 0);
794         assert_eq!(hdr.get_version(), 0x1);
795         assert!(rfds.is_none());
796     }
797 
798     #[test]
test_create_failure()799     fn test_create_failure() {
800         let path = temp_path();
801         let _ = Listener::new(&path, true).unwrap();
802         let _ = Listener::new(&path, false).is_err();
803         assert!(Master::connect(&path, 1).is_err());
804 
805         let listener = Listener::new(&path, true).unwrap();
806         assert!(Listener::new(&path, false).is_err());
807         listener.set_nonblocking(true).unwrap();
808 
809         let _master = Master::connect(&path, 1).unwrap();
810         let _slave = listener.accept().unwrap().unwrap();
811     }
812 
813     #[test]
test_features()814     fn test_features() {
815         let path = temp_path();
816         let (master, mut peer) = create_pair(path);
817 
818         master.set_owner().unwrap();
819         let (hdr, rfds) = peer.recv_header().unwrap();
820         assert_eq!(hdr.get_code().unwrap(), MasterReq::SET_OWNER);
821         assert_eq!(hdr.get_size(), 0);
822         assert_eq!(hdr.get_version(), 0x1);
823         assert!(rfds.is_none());
824 
825         let hdr = VhostUserMsgHeader::new(MasterReq::GET_FEATURES, 0x4, 8);
826         let msg = VhostUserU64::new(0x15);
827         peer.send_message(&hdr, &msg, None).unwrap();
828         let features = master.get_features().unwrap();
829         assert_eq!(features, 0x15u64);
830         let (_hdr, rfds) = peer.recv_header().unwrap();
831         assert!(rfds.is_none());
832 
833         let hdr = VhostUserMsgHeader::new(MasterReq::SET_FEATURES, 0x4, 8);
834         let msg = VhostUserU64::new(0x15);
835         peer.send_message(&hdr, &msg, None).unwrap();
836         master.set_features(0x15).unwrap();
837         let (_hdr, msg, rfds) = peer.recv_body::<VhostUserU64>().unwrap();
838         assert!(rfds.is_none());
839         let val = msg.value;
840         assert_eq!(val, 0x15);
841 
842         let hdr = VhostUserMsgHeader::new(MasterReq::GET_FEATURES, 0x4, 8);
843         let msg = 0x15u32;
844         peer.send_message(&hdr, &msg, None).unwrap();
845         assert!(master.get_features().is_err());
846     }
847 
848     #[test]
test_protocol_features()849     fn test_protocol_features() {
850         let path = temp_path();
851         let (mut master, mut peer) = create_pair(path);
852 
853         master.set_owner().unwrap();
854         let (hdr, rfds) = peer.recv_header().unwrap();
855         assert_eq!(hdr.get_code().unwrap(), MasterReq::SET_OWNER);
856         assert!(rfds.is_none());
857 
858         assert!(master.get_protocol_features().is_err());
859         assert!(master
860             .set_protocol_features(VhostUserProtocolFeatures::all())
861             .is_err());
862 
863         let vfeatures = 0x15 | VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits();
864         let hdr = VhostUserMsgHeader::new(MasterReq::GET_FEATURES, 0x4, 8);
865         let msg = VhostUserU64::new(vfeatures);
866         peer.send_message(&hdr, &msg, None).unwrap();
867         let features = master.get_features().unwrap();
868         assert_eq!(features, vfeatures);
869         let (_hdr, rfds) = peer.recv_header().unwrap();
870         assert!(rfds.is_none());
871 
872         master.set_features(vfeatures).unwrap();
873         let (_hdr, msg, rfds) = peer.recv_body::<VhostUserU64>().unwrap();
874         assert!(rfds.is_none());
875         let val = msg.value;
876         assert_eq!(val, vfeatures);
877 
878         let pfeatures = VhostUserProtocolFeatures::all();
879         let hdr = VhostUserMsgHeader::new(MasterReq::GET_PROTOCOL_FEATURES, 0x4, 8);
880         let msg = VhostUserU64::new(pfeatures.bits());
881         peer.send_message(&hdr, &msg, None).unwrap();
882         let features = master.get_protocol_features().unwrap();
883         assert_eq!(features, pfeatures);
884         let (_hdr, rfds) = peer.recv_header().unwrap();
885         assert!(rfds.is_none());
886 
887         master.set_protocol_features(pfeatures).unwrap();
888         let (_hdr, msg, rfds) = peer.recv_body::<VhostUserU64>().unwrap();
889         assert!(rfds.is_none());
890         let val = msg.value;
891         assert_eq!(val, pfeatures.bits());
892 
893         let hdr = VhostUserMsgHeader::new(MasterReq::SET_PROTOCOL_FEATURES, 0x4, 8);
894         let msg = VhostUserU64::new(pfeatures.bits());
895         peer.send_message(&hdr, &msg, None).unwrap();
896         assert!(master.get_protocol_features().is_err());
897     }
898 
899     #[test]
test_master_set_config_negative()900     fn test_master_set_config_negative() {
901         let path = temp_path();
902         let (mut master, _peer) = create_pair(path);
903         let buf = vec![0x0; MAX_MSG_SIZE + 1];
904 
905         master
906             .set_config(0x100, VhostUserConfigFlags::WRITABLE, &buf[0..4])
907             .unwrap_err();
908 
909         {
910             let mut node = master.node();
911             node.virtio_features = 0xffff_ffff;
912             node.acked_virtio_features = 0xffff_ffff;
913             node.protocol_features = 0xffff_ffff;
914             node.acked_protocol_features = 0xffff_ffff;
915         }
916 
917         master
918             .set_config(0, VhostUserConfigFlags::WRITABLE, &buf[0..4])
919             .unwrap();
920         master
921             .set_config(
922                 VHOST_USER_CONFIG_SIZE,
923                 VhostUserConfigFlags::WRITABLE,
924                 &buf[0..4],
925             )
926             .unwrap_err();
927         master
928             .set_config(0x1000, VhostUserConfigFlags::WRITABLE, &buf[0..4])
929             .unwrap_err();
930         master
931             .set_config(
932                 0x100,
933                 // SAFETY: This is a negative test, so we are setting unexpected flags.
934                 unsafe { VhostUserConfigFlags::from_bits_unchecked(0xffff_ffff) },
935                 &buf[0..4],
936             )
937             .unwrap_err();
938         master
939             .set_config(VHOST_USER_CONFIG_SIZE, VhostUserConfigFlags::WRITABLE, &buf)
940             .unwrap_err();
941         master
942             .set_config(VHOST_USER_CONFIG_SIZE, VhostUserConfigFlags::WRITABLE, &[])
943             .unwrap_err();
944     }
945 
create_pair2() -> (Master, Endpoint<MasterReq>)946     fn create_pair2() -> (Master, Endpoint<MasterReq>) {
947         let path = temp_path();
948         let (master, peer) = create_pair(path);
949 
950         {
951             let mut node = master.node();
952             node.virtio_features = 0xffff_ffff;
953             node.acked_virtio_features = 0xffff_ffff;
954             node.protocol_features = 0xffff_ffff;
955             node.acked_protocol_features = 0xffff_ffff;
956         }
957 
958         (master, peer)
959     }
960 
961     #[test]
test_master_get_config_negative0()962     fn test_master_get_config_negative0() {
963         let (mut master, mut peer) = create_pair2();
964         let buf = vec![0x0; MAX_MSG_SIZE + 1];
965 
966         let mut hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
967         let msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
968         peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
969             .unwrap();
970         assert!(master
971             .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
972             .is_ok());
973 
974         hdr.set_code(MasterReq::GET_FEATURES);
975         peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
976             .unwrap();
977         assert!(master
978             .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
979             .is_err());
980         hdr.set_code(MasterReq::GET_CONFIG);
981     }
982 
983     #[test]
test_master_get_config_negative1()984     fn test_master_get_config_negative1() {
985         let (mut master, mut peer) = create_pair2();
986         let buf = vec![0x0; MAX_MSG_SIZE + 1];
987 
988         let mut hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
989         let msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
990         peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
991             .unwrap();
992         assert!(master
993             .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
994             .is_ok());
995 
996         hdr.set_reply(false);
997         peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
998             .unwrap();
999         assert!(master
1000             .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
1001             .is_err());
1002     }
1003 
1004     #[test]
test_master_get_config_negative2()1005     fn test_master_get_config_negative2() {
1006         let (mut master, mut peer) = create_pair2();
1007         let buf = vec![0x0; MAX_MSG_SIZE + 1];
1008 
1009         let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
1010         let msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
1011         peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
1012             .unwrap();
1013         assert!(master
1014             .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
1015             .is_ok());
1016     }
1017 
1018     #[test]
test_master_get_config_negative3()1019     fn test_master_get_config_negative3() {
1020         let (mut master, mut peer) = create_pair2();
1021         let buf = vec![0x0; MAX_MSG_SIZE + 1];
1022 
1023         let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
1024         let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
1025         peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
1026             .unwrap();
1027         assert!(master
1028             .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
1029             .is_ok());
1030 
1031         msg.offset = 0;
1032         peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
1033             .unwrap();
1034         assert!(master
1035             .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
1036             .is_err());
1037     }
1038 
1039     #[test]
test_master_get_config_negative4()1040     fn test_master_get_config_negative4() {
1041         let (mut master, mut peer) = create_pair2();
1042         let buf = vec![0x0; MAX_MSG_SIZE + 1];
1043 
1044         let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
1045         let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
1046         peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
1047             .unwrap();
1048         assert!(master
1049             .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
1050             .is_ok());
1051 
1052         msg.offset = 0x101;
1053         peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
1054             .unwrap();
1055         assert!(master
1056             .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
1057             .is_err());
1058     }
1059 
1060     #[test]
test_master_get_config_negative5()1061     fn test_master_get_config_negative5() {
1062         let (mut master, mut peer) = create_pair2();
1063         let buf = vec![0x0; MAX_MSG_SIZE + 1];
1064 
1065         let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
1066         let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
1067         peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
1068             .unwrap();
1069         assert!(master
1070             .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
1071             .is_ok());
1072 
1073         msg.offset = (MAX_MSG_SIZE + 1) as u32;
1074         peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
1075             .unwrap();
1076         assert!(master
1077             .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
1078             .is_err());
1079     }
1080 
1081     #[test]
test_master_get_config_negative6()1082     fn test_master_get_config_negative6() {
1083         let (mut master, mut peer) = create_pair2();
1084         let buf = vec![0x0; MAX_MSG_SIZE + 1];
1085 
1086         let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
1087         let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
1088         peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
1089             .unwrap();
1090         assert!(master
1091             .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
1092             .is_ok());
1093 
1094         msg.size = 6;
1095         peer.send_message_with_payload(&hdr, &msg, &buf[0..6], None)
1096             .unwrap();
1097         assert!(master
1098             .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
1099             .is_err());
1100     }
1101 
1102     #[test]
test_maset_set_mem_table_failure()1103     fn test_maset_set_mem_table_failure() {
1104         let (master, _peer) = create_pair2();
1105 
1106         master.set_mem_table(&[]).unwrap_err();
1107         let tables = vec![VhostUserMemoryRegionInfo::default(); MAX_ATTACHED_FD_ENTRIES + 1];
1108         master.set_mem_table(&tables).unwrap_err();
1109     }
1110 }
1111