1 // Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved. 2 // SPDX-License-Identifier: Apache-2.0 3 4 //! Define communication messages for the vhost-user protocol. 5 //! 6 //! For message definition, please refer to the [vhost-user spec](https://qemu.readthedocs.io/en/latest/interop/vhost-user.html). 7 8 #![allow(dead_code)] 9 #![allow(non_camel_case_types)] 10 #![allow(clippy::upper_case_acronyms)] 11 12 use std::fmt::Debug; 13 use std::fs::File; 14 use std::io; 15 use std::marker::PhantomData; 16 use std::ops::Deref; 17 18 use vm_memory::{mmap::NewBitmap, ByteValued, Error as MmapError, FileOffset, MmapRegion}; 19 20 #[cfg(feature = "xen")] 21 use vm_memory::{GuestAddress, MmapRange, MmapXenFlags}; 22 23 use super::{Error, Result}; 24 use crate::VringConfigData; 25 26 /// The vhost-user specification uses a field of u32 to store message length. 27 /// On the other hand, preallocated buffers are needed to receive messages from the Unix domain 28 /// socket. To preallocating a 4GB buffer for each vhost-user message is really just an overhead. 29 /// Among all defined vhost-user messages, only the VhostUserConfig and VhostUserMemory has variable 30 /// message size. For the VhostUserConfig, a maximum size of 4K is enough because the user 31 /// configuration space for virtio devices is (4K - 0x100) bytes at most. For the VhostUserMemory, 32 /// 4K should be enough too because it can support 255 memory regions at most. 33 pub const MAX_MSG_SIZE: usize = 0x1000; 34 35 /// The VhostUserMemory message has variable message size and variable number of attached file 36 /// descriptors. Each user memory region entry in the message payload occupies 32 bytes, 37 /// so setting maximum number of attached file descriptors based on the maximum message size. 38 /// But rust only implements Default and AsMut traits for arrays with 0 - 32 entries, so further 39 /// reduce the maximum number... 40 // pub const MAX_ATTACHED_FD_ENTRIES: usize = (MAX_MSG_SIZE - 8) / 32; 41 pub const MAX_ATTACHED_FD_ENTRIES: usize = 32; 42 43 /// Starting position (inclusion) of the device configuration space in virtio devices. 44 pub const VHOST_USER_CONFIG_OFFSET: u32 = 0x100; 45 46 /// Ending position (exclusion) of the device configuration space in virtio devices. 47 pub const VHOST_USER_CONFIG_SIZE: u32 = 0x1000; 48 49 /// Maximum number of vrings supported. 50 pub const VHOST_USER_MAX_VRINGS: u64 = 0x8000u64; 51 52 pub(super) trait Req: 53 Clone + Copy + Debug + PartialEq + Eq + PartialOrd + Ord + Send + Sync + Into<u32> 54 { is_valid(value: u32) -> bool55 fn is_valid(value: u32) -> bool; 56 } 57 58 /// Type of requests sending from masters to slaves. 59 #[repr(u32)] 60 #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] 61 pub enum MasterReq { 62 /// Null operation. 63 NOOP = 0, 64 /// Get from the underlying vhost implementation the features bit mask. 65 GET_FEATURES = 1, 66 /// Enable features in the underlying vhost implementation using a bit mask. 67 SET_FEATURES = 2, 68 /// Set the current Master as an owner of the session. 69 SET_OWNER = 3, 70 /// No longer used. 71 RESET_OWNER = 4, 72 /// Set the memory map regions on the slave so it can translate the vring addresses. 73 SET_MEM_TABLE = 5, 74 /// Set logging shared memory space. 75 SET_LOG_BASE = 6, 76 /// Set the logging file descriptor, which is passed as ancillary data. 77 SET_LOG_FD = 7, 78 /// Set the size of the queue. 79 SET_VRING_NUM = 8, 80 /// Set the addresses of the different aspects of the vring. 81 SET_VRING_ADDR = 9, 82 /// Set the base offset in the available vring. 83 SET_VRING_BASE = 10, 84 /// Get the available vring base offset. 85 GET_VRING_BASE = 11, 86 /// Set the event file descriptor for adding buffers to the vring. 87 SET_VRING_KICK = 12, 88 /// Set the event file descriptor to signal when buffers are used. 89 SET_VRING_CALL = 13, 90 /// Set the event file descriptor to signal when error occurs. 91 SET_VRING_ERR = 14, 92 /// Get the protocol feature bit mask from the underlying vhost implementation. 93 GET_PROTOCOL_FEATURES = 15, 94 /// Enable protocol features in the underlying vhost implementation. 95 SET_PROTOCOL_FEATURES = 16, 96 /// Query how many queues the backend supports. 97 GET_QUEUE_NUM = 17, 98 /// Signal slave to enable or disable corresponding vring. 99 SET_VRING_ENABLE = 18, 100 /// Ask vhost user backend to broadcast a fake RARP to notify the migration is terminated 101 /// for guest that does not support GUEST_ANNOUNCE. 102 SEND_RARP = 19, 103 /// Set host MTU value exposed to the guest. 104 NET_SET_MTU = 20, 105 /// Set the socket file descriptor for slave initiated requests. 106 SET_SLAVE_REQ_FD = 21, 107 /// Send IOTLB messages with struct vhost_iotlb_msg as payload. 108 IOTLB_MSG = 22, 109 /// Set the endianness of a VQ for legacy devices. 110 SET_VRING_ENDIAN = 23, 111 /// Fetch the contents of the virtio device configuration space. 112 GET_CONFIG = 24, 113 /// Change the contents of the virtio device configuration space. 114 SET_CONFIG = 25, 115 /// Create a session for crypto operation. 116 CREATE_CRYPTO_SESSION = 26, 117 /// Close a session for crypto operation. 118 CLOSE_CRYPTO_SESSION = 27, 119 /// Advise slave that a migration with postcopy enabled is underway. 120 POSTCOPY_ADVISE = 28, 121 /// Advise slave that a transition to postcopy mode has happened. 122 POSTCOPY_LISTEN = 29, 123 /// Advise that postcopy migration has now completed. 124 POSTCOPY_END = 30, 125 /// Get a shared buffer from slave. 126 GET_INFLIGHT_FD = 31, 127 /// Send the shared inflight buffer back to slave. 128 SET_INFLIGHT_FD = 32, 129 /// Sets the GPU protocol socket file descriptor. 130 GPU_SET_SOCKET = 33, 131 /// Ask the vhost user backend to disable all rings and reset all internal 132 /// device state to the initial state. 133 RESET_DEVICE = 34, 134 /// Indicate that a buffer was added to the vring instead of signalling it 135 /// using the vring’s kick file descriptor. 136 VRING_KICK = 35, 137 /// Return a u64 payload containing the maximum number of memory slots. 138 GET_MAX_MEM_SLOTS = 36, 139 /// Update the memory tables by adding the region described. 140 ADD_MEM_REG = 37, 141 /// Update the memory tables by removing the region described. 142 REM_MEM_REG = 38, 143 /// Notify the backend with updated device status as defined in the VIRTIO 144 /// specification. 145 SET_STATUS = 39, 146 /// Query the backend for its device status as defined in the VIRTIO 147 /// specification. 148 GET_STATUS = 40, 149 /// Upper bound of valid commands. 150 MAX_CMD = 41, 151 } 152 153 impl From<MasterReq> for u32 { from(req: MasterReq) -> u32154 fn from(req: MasterReq) -> u32 { 155 req as u32 156 } 157 } 158 159 impl Req for MasterReq { is_valid(value: u32) -> bool160 fn is_valid(value: u32) -> bool { 161 (value > MasterReq::NOOP as u32) && (value < MasterReq::MAX_CMD as u32) 162 } 163 } 164 165 /// Type of requests sending from slaves to masters. 166 #[repr(u32)] 167 #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] 168 pub enum SlaveReq { 169 /// Null operation. 170 NOOP = 0, 171 /// Send IOTLB messages with struct vhost_iotlb_msg as payload. 172 IOTLB_MSG = 1, 173 /// Notify that the virtio device's configuration space has changed. 174 CONFIG_CHANGE_MSG = 2, 175 /// Set host notifier for a specified queue. 176 VRING_HOST_NOTIFIER_MSG = 3, 177 /// Indicate that a buffer was used from the vring. 178 VRING_CALL = 4, 179 /// Indicate that an error occurred on the specific vring. 180 VRING_ERR = 5, 181 /// Virtio-fs draft: map file content into the window. 182 FS_MAP = 6, 183 /// Virtio-fs draft: unmap file content from the window. 184 FS_UNMAP = 7, 185 /// Virtio-fs draft: sync file content. 186 FS_SYNC = 8, 187 /// Virtio-fs draft: perform a read/write from an fd directly to GPA. 188 FS_IO = 9, 189 /// Upper bound of valid commands. 190 MAX_CMD = 10, 191 } 192 193 impl From<SlaveReq> for u32 { from(req: SlaveReq) -> u32194 fn from(req: SlaveReq) -> u32 { 195 req as u32 196 } 197 } 198 199 impl Req for SlaveReq { is_valid(value: u32) -> bool200 fn is_valid(value: u32) -> bool { 201 (value > SlaveReq::NOOP as u32) && (value < SlaveReq::MAX_CMD as u32) 202 } 203 } 204 205 /// Vhost message Validator. 206 pub trait VhostUserMsgValidator { 207 /// Validate message syntax only. 208 /// It doesn't validate message semantics such as protocol version number and dependency 209 /// on feature flags etc. is_valid(&self) -> bool210 fn is_valid(&self) -> bool { 211 true 212 } 213 } 214 215 // Bit mask for common message flags. 216 bitflags! { 217 /// Common message flags for vhost-user requests and replies. 218 pub struct VhostUserHeaderFlag: u32 { 219 /// Bits[0..2] is message version number. 220 const VERSION = 0x3; 221 /// Mark message as reply. 222 const REPLY = 0x4; 223 /// Sender anticipates a reply message from the peer. 224 const NEED_REPLY = 0x8; 225 /// All valid bits. 226 const ALL_FLAGS = 0xc; 227 /// All reserved bits. 228 const RESERVED_BITS = !0xf; 229 } 230 } 231 232 /// Common message header for vhost-user requests and replies. 233 /// A vhost-user message consists of 3 header fields and an optional payload. All numbers are in the 234 /// machine native byte order. 235 #[repr(packed)] 236 #[derive(Copy)] 237 pub(super) struct VhostUserMsgHeader<R: Req> { 238 request: u32, 239 flags: u32, 240 size: u32, 241 _r: PhantomData<R>, 242 } 243 244 impl<R: Req> Debug for VhostUserMsgHeader<R> { fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result245 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 246 f.debug_struct("VhostUserMsgHeader") 247 .field("request", &{ self.request }) 248 .field("flags", &{ self.flags }) 249 .field("size", &{ self.size }) 250 .finish() 251 } 252 } 253 254 impl<R: Req> Clone for VhostUserMsgHeader<R> { clone(&self) -> VhostUserMsgHeader<R>255 fn clone(&self) -> VhostUserMsgHeader<R> { 256 *self 257 } 258 } 259 260 impl<R: Req> PartialEq for VhostUserMsgHeader<R> { eq(&self, other: &Self) -> bool261 fn eq(&self, other: &Self) -> bool { 262 self.request == other.request && self.flags == other.flags && self.size == other.size 263 } 264 } 265 266 impl<R: Req> VhostUserMsgHeader<R> { 267 /// Create a new instance of `VhostUserMsgHeader`. new(request: R, flags: u32, size: u32) -> Self268 pub fn new(request: R, flags: u32, size: u32) -> Self { 269 // Default to protocol version 1 270 let fl = (flags & VhostUserHeaderFlag::ALL_FLAGS.bits()) | 0x1; 271 VhostUserMsgHeader { 272 request: request.into(), 273 flags: fl, 274 size, 275 _r: PhantomData, 276 } 277 } 278 279 /// Get message type. get_code(&self) -> Result<R>280 pub fn get_code(&self) -> Result<R> { 281 if R::is_valid(self.request) { 282 // SAFETY: It's safe because R is marked as repr(u32), and the value is valid. 283 Ok(unsafe { std::mem::transmute_copy::<u32, R>(&{ self.request }) }) 284 } else { 285 Err(Error::InvalidMessage) 286 } 287 } 288 289 /// Set message type. set_code(&mut self, request: R)290 pub fn set_code(&mut self, request: R) { 291 self.request = request.into(); 292 } 293 294 /// Get message version number. get_version(&self) -> u32295 pub fn get_version(&self) -> u32 { 296 self.flags & 0x3 297 } 298 299 /// Set message version number. set_version(&mut self, ver: u32)300 pub fn set_version(&mut self, ver: u32) { 301 self.flags &= !0x3; 302 self.flags |= ver & 0x3; 303 } 304 305 /// Check whether it's a reply message. is_reply(&self) -> bool306 pub fn is_reply(&self) -> bool { 307 (self.flags & VhostUserHeaderFlag::REPLY.bits()) != 0 308 } 309 310 /// Mark message as reply. set_reply(&mut self, is_reply: bool)311 pub fn set_reply(&mut self, is_reply: bool) { 312 if is_reply { 313 self.flags |= VhostUserHeaderFlag::REPLY.bits(); 314 } else { 315 self.flags &= !VhostUserHeaderFlag::REPLY.bits(); 316 } 317 } 318 319 /// Check whether reply for this message is requested. is_need_reply(&self) -> bool320 pub fn is_need_reply(&self) -> bool { 321 (self.flags & VhostUserHeaderFlag::NEED_REPLY.bits()) != 0 322 } 323 324 /// Mark that reply for this message is needed. set_need_reply(&mut self, need_reply: bool)325 pub fn set_need_reply(&mut self, need_reply: bool) { 326 if need_reply { 327 self.flags |= VhostUserHeaderFlag::NEED_REPLY.bits(); 328 } else { 329 self.flags &= !VhostUserHeaderFlag::NEED_REPLY.bits(); 330 } 331 } 332 333 /// Check whether it's the reply message for the request `req`. is_reply_for(&self, req: &VhostUserMsgHeader<R>) -> bool334 pub fn is_reply_for(&self, req: &VhostUserMsgHeader<R>) -> bool { 335 if let (Ok(code1), Ok(code2)) = (self.get_code(), req.get_code()) { 336 self.is_reply() && !req.is_reply() && code1 == code2 337 } else { 338 false 339 } 340 } 341 342 /// Get message size. get_size(&self) -> u32343 pub fn get_size(&self) -> u32 { 344 self.size 345 } 346 347 /// Set message size. set_size(&mut self, size: u32)348 pub fn set_size(&mut self, size: u32) { 349 self.size = size; 350 } 351 } 352 353 impl<R: Req> Default for VhostUserMsgHeader<R> { default() -> Self354 fn default() -> Self { 355 VhostUserMsgHeader { 356 request: 0, 357 flags: 0x1, 358 size: 0, 359 _r: PhantomData, 360 } 361 } 362 } 363 364 // SAFETY: Safe because all fields of VhostUserMsgHeader are POD. 365 unsafe impl<R: Req> ByteValued for VhostUserMsgHeader<R> {} 366 367 impl<T: Req> VhostUserMsgValidator for VhostUserMsgHeader<T> { 368 #[allow(clippy::if_same_then_else)] is_valid(&self) -> bool369 fn is_valid(&self) -> bool { 370 if self.get_code().is_err() { 371 return false; 372 } else if self.size as usize > MAX_MSG_SIZE { 373 return false; 374 } else if self.get_version() != 0x1 { 375 return false; 376 } else if (self.flags & VhostUserHeaderFlag::RESERVED_BITS.bits()) != 0 { 377 return false; 378 } 379 true 380 } 381 } 382 383 // Bit mask for transport specific flags in VirtIO feature set defined by vhost-user. 384 bitflags! { 385 /// Transport specific flags in VirtIO feature set defined by vhost-user. 386 pub struct VhostUserVirtioFeatures: u64 { 387 /// Feature flag for the protocol feature. 388 const PROTOCOL_FEATURES = 0x4000_0000; 389 } 390 } 391 392 // Bit mask for vhost-user protocol feature flags. 393 bitflags! { 394 /// Vhost-user protocol feature flags. 395 pub struct VhostUserProtocolFeatures: u64 { 396 /// Support multiple queues. 397 const MQ = 0x0000_0001; 398 /// Support logging through shared memory fd. 399 const LOG_SHMFD = 0x0000_0002; 400 /// Support broadcasting fake RARP packet. 401 const RARP = 0x0000_0004; 402 /// Support sending reply messages for requests with NEED_REPLY flag set. 403 const REPLY_ACK = 0x0000_0008; 404 /// Support setting MTU for virtio-net devices. 405 const MTU = 0x0000_0010; 406 /// Allow the slave to send requests to the master by an optional communication channel. 407 const SLAVE_REQ = 0x0000_0020; 408 /// Support setting slave endian by SET_VRING_ENDIAN. 409 const CROSS_ENDIAN = 0x0000_0040; 410 /// Support crypto operations. 411 const CRYPTO_SESSION = 0x0000_0080; 412 /// Support sending userfault_fd from slaves to masters. 413 const PAGEFAULT = 0x0000_0100; 414 /// Support Virtio device configuration. 415 const CONFIG = 0x0000_0200; 416 /// Allow the slave to send fds (at most 8 descriptors in each message) to the master. 417 const SLAVE_SEND_FD = 0x0000_0400; 418 /// Allow the slave to register a host notifier. 419 const HOST_NOTIFIER = 0x0000_0800; 420 /// Support inflight shmfd. 421 const INFLIGHT_SHMFD = 0x0000_1000; 422 /// Support resetting the device. 423 const RESET_DEVICE = 0x0000_2000; 424 /// Support inband notifications. 425 const INBAND_NOTIFICATIONS = 0x0000_4000; 426 /// Support configuring memory slots. 427 const CONFIGURE_MEM_SLOTS = 0x0000_8000; 428 /// Support reporting status. 429 const STATUS = 0x0001_0000; 430 /// Support Xen mmap. 431 const XEN_MMAP = 0x0002_0000; 432 } 433 } 434 435 /// A generic message to encapsulate a 64-bit value. 436 #[repr(packed)] 437 #[derive(Copy, Clone, Default)] 438 pub struct VhostUserU64 { 439 /// The encapsulated 64-bit common value. 440 pub value: u64, 441 } 442 443 impl VhostUserU64 { 444 /// Create a new instance. new(value: u64) -> Self445 pub fn new(value: u64) -> Self { 446 VhostUserU64 { value } 447 } 448 } 449 450 // SAFETY: Safe because all fields of VhostUserU64 are POD. 451 unsafe impl ByteValued for VhostUserU64 {} 452 453 impl VhostUserMsgValidator for VhostUserU64 {} 454 455 /// Memory region descriptor for the SET_MEM_TABLE request. 456 #[repr(packed)] 457 #[derive(Copy, Clone, Default)] 458 pub struct VhostUserMemory { 459 /// Number of memory regions in the payload. 460 pub num_regions: u32, 461 /// Padding for alignment. 462 pub padding1: u32, 463 } 464 465 impl VhostUserMemory { 466 /// Create a new instance. new(cnt: u32) -> Self467 pub fn new(cnt: u32) -> Self { 468 VhostUserMemory { 469 num_regions: cnt, 470 padding1: 0, 471 } 472 } 473 } 474 475 // SAFETY: Safe because all fields of VhostUserMemory are POD. 476 unsafe impl ByteValued for VhostUserMemory {} 477 478 impl VhostUserMsgValidator for VhostUserMemory { 479 #[allow(clippy::if_same_then_else)] is_valid(&self) -> bool480 fn is_valid(&self) -> bool { 481 if self.padding1 != 0 { 482 return false; 483 } else if self.num_regions == 0 || self.num_regions > MAX_ATTACHED_FD_ENTRIES as u32 { 484 return false; 485 } 486 true 487 } 488 } 489 490 /// Memory region descriptors as payload for the SET_MEM_TABLE request. 491 #[repr(packed)] 492 #[derive(Default, Clone, Copy)] 493 pub struct VhostUserMemoryRegion { 494 /// Guest physical address of the memory region. 495 pub guest_phys_addr: u64, 496 /// Size of the memory region. 497 pub memory_size: u64, 498 /// Virtual address in the current process. 499 pub user_addr: u64, 500 /// Offset where region starts in the mapped memory. 501 pub mmap_offset: u64, 502 503 #[cfg(feature = "xen")] 504 /// Xen specific flags. 505 pub xen_mmap_flags: u32, 506 507 #[cfg(feature = "xen")] 508 /// Xen specific data. 509 pub xen_mmap_data: u32, 510 } 511 512 impl VhostUserMemoryRegion { is_valid_common(&self) -> bool513 fn is_valid_common(&self) -> bool { 514 self.memory_size != 0 515 && self.guest_phys_addr.checked_add(self.memory_size).is_some() 516 && self.user_addr.checked_add(self.memory_size).is_some() 517 && self.mmap_offset.checked_add(self.memory_size).is_some() 518 } 519 } 520 521 #[cfg(not(feature = "xen"))] 522 impl VhostUserMemoryRegion { 523 /// Create a new instance. new(guest_phys_addr: u64, memory_size: u64, user_addr: u64, mmap_offset: u64) -> Self524 pub fn new(guest_phys_addr: u64, memory_size: u64, user_addr: u64, mmap_offset: u64) -> Self { 525 VhostUserMemoryRegion { 526 guest_phys_addr, 527 memory_size, 528 user_addr, 529 mmap_offset, 530 } 531 } 532 533 /// Creates mmap region from Self. mmap_region<B: NewBitmap>(&self, file: File) -> Result<MmapRegion<B>>534 pub fn mmap_region<B: NewBitmap>(&self, file: File) -> Result<MmapRegion<B>> { 535 MmapRegion::<B>::from_file( 536 FileOffset::new(file, self.mmap_offset), 537 self.memory_size as usize, 538 ) 539 .map_err(MmapError::MmapRegion) 540 .map_err(|e| Error::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e))) 541 } 542 is_valid(&self) -> bool543 fn is_valid(&self) -> bool { 544 self.is_valid_common() 545 } 546 } 547 548 #[cfg(feature = "xen")] 549 impl VhostUserMemoryRegion { 550 /// Create a new instance. with_xen( guest_phys_addr: u64, memory_size: u64, user_addr: u64, mmap_offset: u64, xen_mmap_flags: u32, xen_mmap_data: u32, ) -> Self551 pub fn with_xen( 552 guest_phys_addr: u64, 553 memory_size: u64, 554 user_addr: u64, 555 mmap_offset: u64, 556 xen_mmap_flags: u32, 557 xen_mmap_data: u32, 558 ) -> Self { 559 VhostUserMemoryRegion { 560 guest_phys_addr, 561 memory_size, 562 user_addr, 563 mmap_offset, 564 xen_mmap_flags, 565 xen_mmap_data, 566 } 567 } 568 569 /// Creates mmap region from Self. mmap_region<B: NewBitmap>(&self, file: File) -> Result<MmapRegion<B>>570 pub fn mmap_region<B: NewBitmap>(&self, file: File) -> Result<MmapRegion<B>> { 571 let range = MmapRange::new( 572 self.memory_size as usize, 573 Some(FileOffset::new(file, self.mmap_offset)), 574 GuestAddress(self.guest_phys_addr), 575 self.xen_mmap_flags, 576 self.xen_mmap_data, 577 ); 578 579 MmapRegion::<B>::from_range(range) 580 .map_err(MmapError::MmapRegion) 581 .map_err(|e| Error::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e))) 582 } 583 is_valid(&self) -> bool584 fn is_valid(&self) -> bool { 585 if !self.is_valid_common() { 586 false 587 } else { 588 // Only of one of FOREIGN or GRANT should be set. 589 match MmapXenFlags::from_bits(self.xen_mmap_flags) { 590 Some(flags) => flags.is_valid(), 591 None => false, 592 } 593 } 594 } 595 } 596 597 impl VhostUserMsgValidator for VhostUserMemoryRegion { is_valid(&self) -> bool598 fn is_valid(&self) -> bool { 599 self.is_valid() 600 } 601 } 602 603 /// Payload of the VhostUserMemory message. 604 pub type VhostUserMemoryPayload = Vec<VhostUserMemoryRegion>; 605 606 /// Single memory region descriptor as payload for ADD_MEM_REG and REM_MEM_REG 607 /// requests. 608 #[repr(C)] 609 #[derive(Default, Clone, Copy)] 610 pub struct VhostUserSingleMemoryRegion { 611 /// Padding for correct alignment 612 padding: u64, 613 /// General memory region 614 region: VhostUserMemoryRegion, 615 } 616 617 impl Deref for VhostUserSingleMemoryRegion { 618 type Target = VhostUserMemoryRegion; 619 deref(&self) -> &VhostUserMemoryRegion620 fn deref(&self) -> &VhostUserMemoryRegion { 621 &self.region 622 } 623 } 624 625 #[cfg(not(feature = "xen"))] 626 impl VhostUserSingleMemoryRegion { 627 /// Create a new instance. new(guest_phys_addr: u64, memory_size: u64, user_addr: u64, mmap_offset: u64) -> Self628 pub fn new(guest_phys_addr: u64, memory_size: u64, user_addr: u64, mmap_offset: u64) -> Self { 629 VhostUserSingleMemoryRegion { 630 padding: 0, 631 region: VhostUserMemoryRegion::new( 632 guest_phys_addr, 633 memory_size, 634 user_addr, 635 mmap_offset, 636 ), 637 } 638 } 639 } 640 641 #[cfg(feature = "xen")] 642 impl VhostUserSingleMemoryRegion { 643 /// Create a new instance. new( guest_phys_addr: u64, memory_size: u64, user_addr: u64, mmap_offset: u64, xen_mmap_flags: u32, xen_mmap_data: u32, ) -> Self644 pub fn new( 645 guest_phys_addr: u64, 646 memory_size: u64, 647 user_addr: u64, 648 mmap_offset: u64, 649 xen_mmap_flags: u32, 650 xen_mmap_data: u32, 651 ) -> Self { 652 VhostUserSingleMemoryRegion { 653 padding: 0, 654 region: VhostUserMemoryRegion::with_xen( 655 guest_phys_addr, 656 memory_size, 657 user_addr, 658 mmap_offset, 659 xen_mmap_flags, 660 xen_mmap_data, 661 ), 662 } 663 } 664 } 665 666 // SAFETY: Safe because all fields of VhostUserSingleMemoryRegion are POD. 667 unsafe impl ByteValued for VhostUserSingleMemoryRegion {} 668 impl VhostUserMsgValidator for VhostUserSingleMemoryRegion {} 669 670 /// Vring state descriptor. 671 #[repr(packed)] 672 #[derive(Copy, Clone, Default)] 673 pub struct VhostUserVringState { 674 /// Vring index. 675 pub index: u32, 676 /// A common 32bit value to encapsulate vring state etc. 677 pub num: u32, 678 } 679 680 impl VhostUserVringState { 681 /// Create a new instance. new(index: u32, num: u32) -> Self682 pub fn new(index: u32, num: u32) -> Self { 683 VhostUserVringState { index, num } 684 } 685 } 686 687 // SAFETY: Safe because all fields of VhostUserVringState are POD. 688 unsafe impl ByteValued for VhostUserVringState {} 689 690 impl VhostUserMsgValidator for VhostUserVringState {} 691 692 // Bit mask for vring address flags. 693 bitflags! { 694 /// Flags for vring address. 695 pub struct VhostUserVringAddrFlags: u32 { 696 /// Support log of vring operations. 697 /// Modifications to "used" vring should be logged. 698 const VHOST_VRING_F_LOG = 0x1; 699 } 700 } 701 702 /// Vring address descriptor. 703 #[repr(packed)] 704 #[derive(Copy, Clone, Default)] 705 pub struct VhostUserVringAddr { 706 /// Vring index. 707 pub index: u32, 708 /// Vring flags defined by VhostUserVringAddrFlags. 709 pub flags: u32, 710 /// Ring address of the vring descriptor table. 711 pub descriptor: u64, 712 /// Ring address of the vring used ring. 713 pub used: u64, 714 /// Ring address of the vring available ring. 715 pub available: u64, 716 /// Guest address for logging. 717 pub log: u64, 718 } 719 720 impl VhostUserVringAddr { 721 /// Create a new instance. new( index: u32, flags: VhostUserVringAddrFlags, descriptor: u64, used: u64, available: u64, log: u64, ) -> Self722 pub fn new( 723 index: u32, 724 flags: VhostUserVringAddrFlags, 725 descriptor: u64, 726 used: u64, 727 available: u64, 728 log: u64, 729 ) -> Self { 730 VhostUserVringAddr { 731 index, 732 flags: flags.bits(), 733 descriptor, 734 used, 735 available, 736 log, 737 } 738 } 739 740 /// Create a new instance from `VringConfigData`. 741 #[cfg_attr(feature = "cargo-clippy", allow(clippy::useless_conversion))] from_config_data(index: u32, config_data: &VringConfigData) -> Self742 pub fn from_config_data(index: u32, config_data: &VringConfigData) -> Self { 743 let log_addr = config_data.log_addr.unwrap_or(0); 744 VhostUserVringAddr { 745 index, 746 flags: config_data.flags, 747 descriptor: config_data.desc_table_addr, 748 used: config_data.used_ring_addr, 749 available: config_data.avail_ring_addr, 750 log: log_addr, 751 } 752 } 753 } 754 755 // SAFETY: Safe because all fields of VhostUserVringAddr are POD. 756 unsafe impl ByteValued for VhostUserVringAddr {} 757 758 impl VhostUserMsgValidator for VhostUserVringAddr { 759 #[allow(clippy::if_same_then_else)] is_valid(&self) -> bool760 fn is_valid(&self) -> bool { 761 if (self.flags & !VhostUserVringAddrFlags::all().bits()) != 0 { 762 return false; 763 } else if self.descriptor & 0xf != 0 { 764 return false; 765 } else if self.available & 0x1 != 0 { 766 return false; 767 } else if self.used & 0x3 != 0 { 768 return false; 769 } 770 true 771 } 772 } 773 774 // Bit mask for the vhost-user device configuration message. 775 bitflags! { 776 /// Flags for the device configuration message. 777 pub struct VhostUserConfigFlags: u32 { 778 /// Vhost master messages used for writeable fields. 779 const WRITABLE = 0x1; 780 /// Vhost master messages used for live migration. 781 const LIVE_MIGRATION = 0x2; 782 } 783 } 784 785 /// Message to read/write device configuration space. 786 #[repr(packed)] 787 #[derive(Copy, Clone, Default)] 788 pub struct VhostUserConfig { 789 /// Offset of virtio device's configuration space. 790 pub offset: u32, 791 /// Configuration space access size in bytes. 792 pub size: u32, 793 /// Flags for the device configuration operation. 794 pub flags: u32, 795 } 796 797 impl VhostUserConfig { 798 /// Create a new instance. new(offset: u32, size: u32, flags: VhostUserConfigFlags) -> Self799 pub fn new(offset: u32, size: u32, flags: VhostUserConfigFlags) -> Self { 800 VhostUserConfig { 801 offset, 802 size, 803 flags: flags.bits(), 804 } 805 } 806 } 807 808 // SAFETY: Safe because all fields of VhostUserConfig are POD. 809 unsafe impl ByteValued for VhostUserConfig {} 810 811 impl VhostUserMsgValidator for VhostUserConfig { 812 #[allow(clippy::if_same_then_else)] is_valid(&self) -> bool813 fn is_valid(&self) -> bool { 814 let end_addr = match self.size.checked_add(self.offset) { 815 Some(addr) => addr, 816 None => return false, 817 }; 818 if (self.flags & !VhostUserConfigFlags::all().bits()) != 0 { 819 return false; 820 } else if self.size == 0 || end_addr > VHOST_USER_CONFIG_SIZE { 821 return false; 822 } 823 true 824 } 825 } 826 827 /// Payload for the VhostUserConfig message. 828 pub type VhostUserConfigPayload = Vec<u8>; 829 830 /// Single memory region descriptor as payload for ADD_MEM_REG and REM_MEM_REG 831 /// requests. 832 #[repr(C)] 833 #[derive(Copy, Clone, Default)] 834 pub struct VhostUserInflight { 835 /// Size of the area to track inflight I/O. 836 pub mmap_size: u64, 837 /// Offset of this area from the start of the supplied file descriptor. 838 pub mmap_offset: u64, 839 /// Number of virtqueues. 840 pub num_queues: u16, 841 /// Size of virtqueues. 842 pub queue_size: u16, 843 } 844 845 impl VhostUserInflight { 846 /// Create a new instance. new(mmap_size: u64, mmap_offset: u64, num_queues: u16, queue_size: u16) -> Self847 pub fn new(mmap_size: u64, mmap_offset: u64, num_queues: u16, queue_size: u16) -> Self { 848 VhostUserInflight { 849 mmap_size, 850 mmap_offset, 851 num_queues, 852 queue_size, 853 } 854 } 855 } 856 857 // SAFETY: Safe because all fields of VhostUserInflight are POD. 858 unsafe impl ByteValued for VhostUserInflight {} 859 860 impl VhostUserMsgValidator for VhostUserInflight { is_valid(&self) -> bool861 fn is_valid(&self) -> bool { 862 if self.num_queues == 0 || self.queue_size == 0 { 863 return false; 864 } 865 true 866 } 867 } 868 869 /// Single memory region descriptor as payload for SET_LOG_BASE request. 870 #[repr(C)] 871 #[derive(Copy, Clone, Default)] 872 pub struct VhostUserLog { 873 /// Size of the area to log dirty pages. 874 pub mmap_size: u64, 875 /// Offset of this area from the start of the supplied file descriptor. 876 pub mmap_offset: u64, 877 } 878 879 impl VhostUserLog { 880 /// Create a new instance. new(mmap_size: u64, mmap_offset: u64) -> Self881 pub fn new(mmap_size: u64, mmap_offset: u64) -> Self { 882 VhostUserLog { 883 mmap_size, 884 mmap_offset, 885 } 886 } 887 } 888 889 // SAFETY: Safe because all fields of VhostUserLog are POD. 890 unsafe impl ByteValued for VhostUserLog {} 891 892 impl VhostUserMsgValidator for VhostUserLog { is_valid(&self) -> bool893 fn is_valid(&self) -> bool { 894 if self.mmap_size == 0 || self.mmap_offset.checked_add(self.mmap_size).is_none() { 895 return false; 896 } 897 true 898 } 899 } 900 901 /* 902 * TODO: support dirty log, live migration and IOTLB operations. 903 #[repr(packed)] 904 pub struct VhostUserVringArea { 905 pub index: u32, 906 pub flags: u32, 907 pub size: u64, 908 pub offset: u64, 909 } 910 911 #[repr(packed)] 912 pub struct VhostUserLog { 913 pub size: u64, 914 pub offset: u64, 915 } 916 917 #[repr(packed)] 918 pub struct VhostUserIotlb { 919 pub iova: u64, 920 pub size: u64, 921 pub user_addr: u64, 922 pub permission: u8, 923 pub optype: u8, 924 } 925 */ 926 927 // Bit mask for flags in virtio-fs slave messages 928 bitflags! { 929 #[derive(Default)] 930 /// Flags for virtio-fs slave messages. 931 pub struct VhostUserFSSlaveMsgFlags: u64 { 932 /// Empty permission. 933 const EMPTY = 0x0; 934 /// Read permission. 935 const MAP_R = 0x1; 936 /// Write permission. 937 const MAP_W = 0x2; 938 } 939 } 940 941 /// Max entries in one virtio-fs slave request. 942 pub const VHOST_USER_FS_SLAVE_ENTRIES: usize = 8; 943 944 /// Slave request message to update the MMIO window. 945 #[repr(packed)] 946 #[derive(Copy, Clone, Default)] 947 pub struct VhostUserFSSlaveMsg { 948 /// File offset. 949 pub fd_offset: [u64; VHOST_USER_FS_SLAVE_ENTRIES], 950 /// Offset into the DAX window. 951 pub cache_offset: [u64; VHOST_USER_FS_SLAVE_ENTRIES], 952 /// Size of region to map. 953 pub len: [u64; VHOST_USER_FS_SLAVE_ENTRIES], 954 /// Flags for the mmap operation 955 pub flags: [VhostUserFSSlaveMsgFlags; VHOST_USER_FS_SLAVE_ENTRIES], 956 } 957 958 // SAFETY: Safe because all fields of VhostUserFSSlaveMsg are POD. 959 unsafe impl ByteValued for VhostUserFSSlaveMsg {} 960 961 impl VhostUserMsgValidator for VhostUserFSSlaveMsg { is_valid(&self) -> bool962 fn is_valid(&self) -> bool { 963 for i in 0..VHOST_USER_FS_SLAVE_ENTRIES { 964 if ({ self.flags[i] }.bits() & !VhostUserFSSlaveMsgFlags::all().bits()) != 0 965 || self.fd_offset[i].checked_add(self.len[i]).is_none() 966 || self.cache_offset[i].checked_add(self.len[i]).is_none() 967 { 968 return false; 969 } 970 } 971 true 972 } 973 } 974 975 /// Inflight I/O descriptor state for split virtqueues 976 #[repr(packed)] 977 #[derive(Clone, Copy, Default)] 978 pub struct DescStateSplit { 979 /// Indicate whether this descriptor (only head) is inflight or not. 980 pub inflight: u8, 981 /// Padding 982 padding: [u8; 5], 983 /// List of last batch of used descriptors, only when batching is used for submitting 984 pub next: u16, 985 /// Preserve order of fetching available descriptors, only for head descriptor 986 pub counter: u64, 987 } 988 989 impl DescStateSplit { 990 /// New instance of DescStateSplit struct new() -> Self991 pub fn new() -> Self { 992 Self::default() 993 } 994 } 995 996 /// Inflight I/O queue region for split virtqueues 997 #[repr(packed)] 998 pub struct QueueRegionSplit { 999 /// Features flags of this region 1000 pub features: u64, 1001 /// Version of this region 1002 pub version: u16, 1003 /// Number of DescStateSplit entries 1004 pub desc_num: u16, 1005 /// List to track last batch of used descriptors 1006 pub last_batch_head: u16, 1007 /// Idx value of used ring 1008 pub used_idx: u16, 1009 /// Pointer to an array of DescStateSplit entries 1010 pub desc: u64, 1011 } 1012 1013 impl QueueRegionSplit { 1014 /// New instance of QueueRegionSplit struct new(features: u64, queue_size: u16) -> Self1015 pub fn new(features: u64, queue_size: u16) -> Self { 1016 QueueRegionSplit { 1017 features, 1018 version: 1, 1019 desc_num: queue_size, 1020 last_batch_head: 0, 1021 used_idx: 0, 1022 desc: 0, 1023 } 1024 } 1025 } 1026 1027 /// Inflight I/O descriptor state for packed virtqueues 1028 #[repr(packed)] 1029 #[derive(Clone, Copy, Default)] 1030 pub struct DescStatePacked { 1031 /// Indicate whether this descriptor (only head) is inflight or not. 1032 pub inflight: u8, 1033 /// Padding 1034 padding: u8, 1035 /// Link to next free entry 1036 pub next: u16, 1037 /// Link to last entry of descriptor list, only for head 1038 pub last: u16, 1039 /// Length of descriptor list, only for head 1040 pub num: u16, 1041 /// Preserve order of fetching avail descriptors, only for head 1042 pub counter: u64, 1043 /// Buffer ID 1044 pub id: u16, 1045 /// Descriptor flags 1046 pub flags: u16, 1047 /// Buffer length 1048 pub len: u32, 1049 /// Buffer address 1050 pub addr: u64, 1051 } 1052 1053 impl DescStatePacked { 1054 /// New instance of DescStatePacked struct new() -> Self1055 pub fn new() -> Self { 1056 Self::default() 1057 } 1058 } 1059 1060 /// Inflight I/O queue region for packed virtqueues 1061 #[repr(packed)] 1062 pub struct QueueRegionPacked { 1063 /// Features flags of this region 1064 pub features: u64, 1065 /// version of this region 1066 pub version: u16, 1067 /// size of descriptor state array 1068 pub desc_num: u16, 1069 /// head of free DescStatePacked entry list 1070 pub free_head: u16, 1071 /// old head of free DescStatePacked entry list 1072 pub old_free_head: u16, 1073 /// used idx of descriptor ring 1074 pub used_idx: u16, 1075 /// old used idx of descriptor ring 1076 pub old_used_idx: u16, 1077 /// device ring wrap counter 1078 pub used_wrap_counter: u8, 1079 /// old device ring wrap counter 1080 pub old_used_wrap_counter: u8, 1081 /// Padding 1082 padding: [u8; 7], 1083 /// Pointer to array tracking state of each descriptor from descriptor ring 1084 pub desc: u64, 1085 } 1086 1087 impl QueueRegionPacked { 1088 /// New instance of QueueRegionPacked struct new(features: u64, queue_size: u16) -> Self1089 pub fn new(features: u64, queue_size: u16) -> Self { 1090 QueueRegionPacked { 1091 features, 1092 version: 1, 1093 desc_num: queue_size, 1094 free_head: 0, 1095 old_free_head: 0, 1096 used_idx: 0, 1097 old_used_idx: 0, 1098 used_wrap_counter: 0, 1099 old_used_wrap_counter: 0, 1100 padding: [0; 7], 1101 desc: 0, 1102 } 1103 } 1104 } 1105 1106 #[cfg(test)] 1107 mod tests { 1108 use super::*; 1109 use std::mem; 1110 1111 #[cfg(feature = "xen")] 1112 impl VhostUserMemoryRegion { new(guest_phys_addr: u64, memory_size: u64, user_addr: u64, mmap_offset: u64) -> Self1113 fn new(guest_phys_addr: u64, memory_size: u64, user_addr: u64, mmap_offset: u64) -> Self { 1114 Self::with_xen( 1115 guest_phys_addr, 1116 memory_size, 1117 user_addr, 1118 mmap_offset, 1119 MmapXenFlags::FOREIGN.bits(), 1120 0, 1121 ) 1122 } 1123 } 1124 1125 #[test] check_master_request_code()1126 fn check_master_request_code() { 1127 assert!(!MasterReq::is_valid(MasterReq::NOOP as _)); 1128 assert!(!MasterReq::is_valid(MasterReq::MAX_CMD as _)); 1129 assert!(MasterReq::MAX_CMD > MasterReq::NOOP); 1130 let code = MasterReq::GET_FEATURES; 1131 assert!(MasterReq::is_valid(code as _)); 1132 assert_eq!(code, code.clone()); 1133 assert!(!MasterReq::is_valid(10000)); 1134 } 1135 1136 #[test] check_slave_request_code()1137 fn check_slave_request_code() { 1138 assert!(!SlaveReq::is_valid(SlaveReq::NOOP as _)); 1139 assert!(!SlaveReq::is_valid(SlaveReq::MAX_CMD as _)); 1140 assert!(SlaveReq::MAX_CMD > SlaveReq::NOOP); 1141 let code = SlaveReq::CONFIG_CHANGE_MSG; 1142 assert!(SlaveReq::is_valid(code as _)); 1143 assert_eq!(code, code.clone()); 1144 assert!(!SlaveReq::is_valid(10000)); 1145 } 1146 1147 #[test] msg_header_ops()1148 fn msg_header_ops() { 1149 let mut hdr = VhostUserMsgHeader::new(MasterReq::GET_FEATURES, 0, 0x100); 1150 assert_eq!(hdr.get_code().unwrap(), MasterReq::GET_FEATURES); 1151 hdr.set_code(MasterReq::SET_FEATURES); 1152 assert_eq!(hdr.get_code().unwrap(), MasterReq::SET_FEATURES); 1153 1154 assert_eq!(hdr.get_version(), 0x1); 1155 1156 assert!(!hdr.is_reply()); 1157 hdr.set_reply(true); 1158 assert!(hdr.is_reply()); 1159 hdr.set_reply(false); 1160 1161 assert!(!hdr.is_need_reply()); 1162 hdr.set_need_reply(true); 1163 assert!(hdr.is_need_reply()); 1164 hdr.set_need_reply(false); 1165 1166 assert_eq!(hdr.get_size(), 0x100); 1167 hdr.set_size(0x200); 1168 assert_eq!(hdr.get_size(), 0x200); 1169 1170 assert!(!hdr.is_need_reply()); 1171 assert!(!hdr.is_reply()); 1172 assert_eq!(hdr.get_version(), 0x1); 1173 1174 // Check message length 1175 assert!(hdr.is_valid()); 1176 hdr.set_size(0x2000); 1177 assert!(!hdr.is_valid()); 1178 hdr.set_size(0x100); 1179 assert_eq!(hdr.get_size(), 0x100); 1180 assert!(hdr.is_valid()); 1181 hdr.set_size((MAX_MSG_SIZE - mem::size_of::<VhostUserMsgHeader<MasterReq>>()) as u32); 1182 assert!(hdr.is_valid()); 1183 hdr.set_size(0x0); 1184 assert!(hdr.is_valid()); 1185 1186 // Check version 1187 hdr.set_version(0x0); 1188 assert!(!hdr.is_valid()); 1189 hdr.set_version(0x2); 1190 assert!(!hdr.is_valid()); 1191 hdr.set_version(0x1); 1192 assert!(hdr.is_valid()); 1193 1194 // Test Debug, Clone, PartiaEq trait 1195 assert_eq!(hdr, hdr.clone()); 1196 assert_eq!(hdr.clone().get_code().unwrap(), hdr.get_code().unwrap()); 1197 assert_eq!(format!("{:?}", hdr.clone()), format!("{:?}", hdr)); 1198 } 1199 1200 #[test] test_vhost_user_message_u64()1201 fn test_vhost_user_message_u64() { 1202 let val = VhostUserU64::default(); 1203 let val1 = VhostUserU64::new(0); 1204 1205 let a = val.value; 1206 let b = val1.value; 1207 assert_eq!(a, b); 1208 let a = VhostUserU64::new(1).value; 1209 assert_eq!(a, 1); 1210 } 1211 1212 #[test] check_user_memory()1213 fn check_user_memory() { 1214 let mut msg = VhostUserMemory::new(1); 1215 assert!(msg.is_valid()); 1216 msg.num_regions = MAX_ATTACHED_FD_ENTRIES as u32; 1217 assert!(msg.is_valid()); 1218 1219 msg.num_regions += 1; 1220 assert!(!msg.is_valid()); 1221 msg.num_regions = 0xFFFFFFFF; 1222 assert!(!msg.is_valid()); 1223 msg.num_regions = MAX_ATTACHED_FD_ENTRIES as u32; 1224 msg.padding1 = 1; 1225 assert!(!msg.is_valid()); 1226 } 1227 1228 #[test] check_user_memory_region()1229 fn check_user_memory_region() { 1230 let mut msg = VhostUserMemoryRegion::new(0, 0x1000, 0, 0); 1231 assert!(msg.is_valid()); 1232 msg.guest_phys_addr = 0xFFFFFFFFFFFFEFFF; 1233 assert!(msg.is_valid()); 1234 msg.guest_phys_addr = 0xFFFFFFFFFFFFF000; 1235 assert!(!msg.is_valid()); 1236 msg.guest_phys_addr = 0xFFFFFFFFFFFF0000; 1237 msg.memory_size = 0; 1238 assert!(!msg.is_valid()); 1239 let a = msg.guest_phys_addr; 1240 let b = msg.guest_phys_addr; 1241 assert_eq!(a, b); 1242 1243 let msg = VhostUserMemoryRegion::default(); 1244 let a = msg.guest_phys_addr; 1245 assert_eq!(a, 0); 1246 let a = msg.memory_size; 1247 assert_eq!(a, 0); 1248 let a = msg.user_addr; 1249 assert_eq!(a, 0); 1250 let a = msg.mmap_offset; 1251 assert_eq!(a, 0); 1252 } 1253 1254 #[test] test_vhost_user_state()1255 fn test_vhost_user_state() { 1256 let state = VhostUserVringState::new(5, 8); 1257 1258 let a = state.index; 1259 assert_eq!(a, 5); 1260 let a = state.num; 1261 assert_eq!(a, 8); 1262 assert!(state.is_valid()); 1263 1264 let state = VhostUserVringState::default(); 1265 let a = state.index; 1266 assert_eq!(a, 0); 1267 let a = state.num; 1268 assert_eq!(a, 0); 1269 assert!(state.is_valid()); 1270 } 1271 1272 #[test] test_vhost_user_addr()1273 fn test_vhost_user_addr() { 1274 let mut addr = VhostUserVringAddr::new( 1275 2, 1276 VhostUserVringAddrFlags::VHOST_VRING_F_LOG, 1277 0x1000, 1278 0x2000, 1279 0x3000, 1280 0x4000, 1281 ); 1282 1283 let a = addr.index; 1284 assert_eq!(a, 2); 1285 let a = addr.flags; 1286 assert_eq!(a, VhostUserVringAddrFlags::VHOST_VRING_F_LOG.bits()); 1287 let a = addr.descriptor; 1288 assert_eq!(a, 0x1000); 1289 let a = addr.used; 1290 assert_eq!(a, 0x2000); 1291 let a = addr.available; 1292 assert_eq!(a, 0x3000); 1293 let a = addr.log; 1294 assert_eq!(a, 0x4000); 1295 assert!(addr.is_valid()); 1296 1297 addr.descriptor = 0x1001; 1298 assert!(!addr.is_valid()); 1299 addr.descriptor = 0x1000; 1300 1301 addr.available = 0x3001; 1302 assert!(!addr.is_valid()); 1303 addr.available = 0x3000; 1304 1305 addr.used = 0x2001; 1306 assert!(!addr.is_valid()); 1307 addr.used = 0x2000; 1308 assert!(addr.is_valid()); 1309 } 1310 1311 #[test] test_vhost_user_state_from_config()1312 fn test_vhost_user_state_from_config() { 1313 let config = VringConfigData { 1314 queue_max_size: 256, 1315 queue_size: 128, 1316 flags: VhostUserVringAddrFlags::VHOST_VRING_F_LOG.bits, 1317 desc_table_addr: 0x1000, 1318 used_ring_addr: 0x2000, 1319 avail_ring_addr: 0x3000, 1320 log_addr: Some(0x4000), 1321 }; 1322 let addr = VhostUserVringAddr::from_config_data(2, &config); 1323 1324 let a = addr.index; 1325 assert_eq!(a, 2); 1326 let a = addr.flags; 1327 assert_eq!(a, VhostUserVringAddrFlags::VHOST_VRING_F_LOG.bits()); 1328 let a = addr.descriptor; 1329 assert_eq!(a, 0x1000); 1330 let a = addr.used; 1331 assert_eq!(a, 0x2000); 1332 let a = addr.available; 1333 assert_eq!(a, 0x3000); 1334 let a = addr.log; 1335 assert_eq!(a, 0x4000); 1336 assert!(addr.is_valid()); 1337 } 1338 1339 #[test] check_user_vring_addr()1340 fn check_user_vring_addr() { 1341 let mut msg = 1342 VhostUserVringAddr::new(0, VhostUserVringAddrFlags::all(), 0x0, 0x0, 0x0, 0x0); 1343 assert!(msg.is_valid()); 1344 1345 msg.descriptor = 1; 1346 assert!(!msg.is_valid()); 1347 msg.descriptor = 0; 1348 1349 msg.available = 1; 1350 assert!(!msg.is_valid()); 1351 msg.available = 0; 1352 1353 msg.used = 1; 1354 assert!(!msg.is_valid()); 1355 msg.used = 0; 1356 1357 msg.flags |= 0x80000000; 1358 assert!(!msg.is_valid()); 1359 msg.flags &= !0x80000000; 1360 } 1361 1362 #[test] check_user_config_msg()1363 fn check_user_config_msg() { 1364 let mut msg = 1365 VhostUserConfig::new(0, VHOST_USER_CONFIG_SIZE, VhostUserConfigFlags::WRITABLE); 1366 1367 assert!(msg.is_valid()); 1368 msg.size = 0; 1369 assert!(!msg.is_valid()); 1370 msg.size = 1; 1371 assert!(msg.is_valid()); 1372 msg.offset = u32::MAX; 1373 assert!(!msg.is_valid()); 1374 msg.offset = VHOST_USER_CONFIG_SIZE; 1375 assert!(!msg.is_valid()); 1376 msg.offset = VHOST_USER_CONFIG_SIZE - 1; 1377 assert!(msg.is_valid()); 1378 msg.size = 2; 1379 assert!(!msg.is_valid()); 1380 msg.size = 1; 1381 msg.flags |= VhostUserConfigFlags::LIVE_MIGRATION.bits(); 1382 assert!(msg.is_valid()); 1383 msg.flags |= 0x4; 1384 assert!(!msg.is_valid()); 1385 } 1386 1387 #[test] test_vhost_user_fs_slave()1388 fn test_vhost_user_fs_slave() { 1389 let mut fs_slave = VhostUserFSSlaveMsg::default(); 1390 1391 assert!(fs_slave.is_valid()); 1392 1393 fs_slave.fd_offset[0] = 0xffff_ffff_ffff_ffff; 1394 fs_slave.len[0] = 0x1; 1395 assert!(!fs_slave.is_valid()); 1396 1397 assert_ne!( 1398 VhostUserFSSlaveMsgFlags::MAP_R, 1399 VhostUserFSSlaveMsgFlags::MAP_W 1400 ); 1401 assert_eq!(VhostUserFSSlaveMsgFlags::EMPTY.bits(), 0); 1402 } 1403 } 1404