1 // Copyright (C) 2021 Alibaba Cloud. All rights reserved. 2 // 3 // SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause 4 5 use std::num::Wrapping; 6 use std::ops::Deref; 7 use std::sync::atomic::Ordering; 8 use std::sync::{Arc, Mutex, MutexGuard}; 9 10 use vm_memory::GuestMemory; 11 12 use crate::{DescriptorChain, Error, Queue, QueueGuard, QueueT}; 13 14 /// Struct to maintain information and manipulate state of a virtio queue for multi-threaded 15 /// context. 16 /// 17 /// # Example 18 /// 19 /// ```rust 20 /// use virtio_queue::{Queue, QueueSync, QueueT}; 21 /// use vm_memory::{Bytes, GuestAddress, GuestAddressSpace, GuestMemoryMmap}; 22 /// 23 /// let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); 24 /// let mut queue = QueueSync::new(1024).unwrap(); 25 /// 26 /// // First, the driver sets up the queue; this set up is done via writes on the bus (PCI, MMIO). 27 /// queue.set_size(8); 28 /// queue.set_desc_table_address(Some(0x1000), None); 29 /// queue.set_avail_ring_address(Some(0x2000), None); 30 /// queue.set_used_ring_address(Some(0x3000), None); 31 /// queue.set_ready(true); 32 /// // The user should check if the queue is valid before starting to use it. 33 /// assert!(queue.is_valid(m.memory())); 34 /// 35 /// // The memory object is not embedded in the `QueueSync`, so we have to pass it as a 36 /// // parameter to the methods that access the guest memory. Examples would be: 37 /// queue.add_used(m.memory(), 1, 0x100).unwrap(); 38 /// queue.needs_notification(m.memory()).unwrap(); 39 /// ``` 40 #[derive(Clone, Debug)] 41 pub struct QueueSync { 42 state: Arc<Mutex<Queue>>, 43 } 44 45 impl QueueSync { lock_state(&self) -> MutexGuard<Queue>46 fn lock_state(&self) -> MutexGuard<Queue> { 47 // Do not expect poisoned lock. 48 self.state.lock().unwrap() 49 } 50 } 51 52 impl<'a> QueueGuard<'a> for QueueSync { 53 type G = MutexGuard<'a, Queue>; 54 } 55 56 impl QueueT for QueueSync { new(max_size: u16) -> Result<Self, Error>57 fn new(max_size: u16) -> Result<Self, Error> { 58 Ok(QueueSync { 59 state: Arc::new(Mutex::new(Queue::new(max_size)?)), 60 }) 61 } 62 is_valid<M: GuestMemory>(&self, mem: &M) -> bool63 fn is_valid<M: GuestMemory>(&self, mem: &M) -> bool { 64 self.lock_state().is_valid(mem) 65 } 66 reset(&mut self)67 fn reset(&mut self) { 68 self.lock_state().reset(); 69 } 70 lock(&mut self) -> <Self as QueueGuard>::G71 fn lock(&mut self) -> <Self as QueueGuard>::G { 72 self.lock_state() 73 } 74 max_size(&self) -> u1675 fn max_size(&self) -> u16 { 76 self.lock_state().max_size() 77 } 78 size(&self) -> u1679 fn size(&self) -> u16 { 80 self.lock_state().size() 81 } 82 set_size(&mut self, size: u16)83 fn set_size(&mut self, size: u16) { 84 self.lock_state().set_size(size); 85 } 86 ready(&self) -> bool87 fn ready(&self) -> bool { 88 self.lock_state().ready() 89 } 90 set_ready(&mut self, ready: bool)91 fn set_ready(&mut self, ready: bool) { 92 self.lock_state().set_ready(ready) 93 } 94 set_desc_table_address(&mut self, low: Option<u32>, high: Option<u32>)95 fn set_desc_table_address(&mut self, low: Option<u32>, high: Option<u32>) { 96 self.lock_state().set_desc_table_address(low, high); 97 } 98 set_avail_ring_address(&mut self, low: Option<u32>, high: Option<u32>)99 fn set_avail_ring_address(&mut self, low: Option<u32>, high: Option<u32>) { 100 self.lock_state().set_avail_ring_address(low, high); 101 } 102 set_used_ring_address(&mut self, low: Option<u32>, high: Option<u32>)103 fn set_used_ring_address(&mut self, low: Option<u32>, high: Option<u32>) { 104 self.lock_state().set_used_ring_address(low, high); 105 } 106 set_event_idx(&mut self, enabled: bool)107 fn set_event_idx(&mut self, enabled: bool) { 108 self.lock_state().set_event_idx(enabled); 109 } 110 avail_idx<M>(&self, mem: &M, order: Ordering) -> Result<Wrapping<u16>, Error> where M: GuestMemory + ?Sized,111 fn avail_idx<M>(&self, mem: &M, order: Ordering) -> Result<Wrapping<u16>, Error> 112 where 113 M: GuestMemory + ?Sized, 114 { 115 self.lock_state().avail_idx(mem, order) 116 } 117 used_idx<M: GuestMemory>(&self, mem: &M, order: Ordering) -> Result<Wrapping<u16>, Error>118 fn used_idx<M: GuestMemory>(&self, mem: &M, order: Ordering) -> Result<Wrapping<u16>, Error> { 119 self.lock_state().used_idx(mem, order) 120 } 121 add_used<M: GuestMemory>( &mut self, mem: &M, head_index: u16, len: u32, ) -> Result<(), Error>122 fn add_used<M: GuestMemory>( 123 &mut self, 124 mem: &M, 125 head_index: u16, 126 len: u32, 127 ) -> Result<(), Error> { 128 self.lock_state().add_used(mem, head_index, len) 129 } 130 enable_notification<M: GuestMemory>(&mut self, mem: &M) -> Result<bool, Error>131 fn enable_notification<M: GuestMemory>(&mut self, mem: &M) -> Result<bool, Error> { 132 self.lock_state().enable_notification(mem) 133 } 134 disable_notification<M: GuestMemory>(&mut self, mem: &M) -> Result<(), Error>135 fn disable_notification<M: GuestMemory>(&mut self, mem: &M) -> Result<(), Error> { 136 self.lock_state().disable_notification(mem) 137 } 138 needs_notification<M: GuestMemory>(&mut self, mem: &M) -> Result<bool, Error>139 fn needs_notification<M: GuestMemory>(&mut self, mem: &M) -> Result<bool, Error> { 140 self.lock_state().needs_notification(mem) 141 } 142 next_avail(&self) -> u16143 fn next_avail(&self) -> u16 { 144 self.lock_state().next_avail() 145 } 146 set_next_avail(&mut self, next_avail: u16)147 fn set_next_avail(&mut self, next_avail: u16) { 148 self.lock_state().set_next_avail(next_avail); 149 } 150 next_used(&self) -> u16151 fn next_used(&self) -> u16 { 152 self.lock_state().next_used() 153 } 154 set_next_used(&mut self, next_used: u16)155 fn set_next_used(&mut self, next_used: u16) { 156 self.lock_state().set_next_used(next_used); 157 } 158 desc_table(&self) -> u64159 fn desc_table(&self) -> u64 { 160 self.lock_state().desc_table() 161 } 162 avail_ring(&self) -> u64163 fn avail_ring(&self) -> u64 { 164 self.lock_state().avail_ring() 165 } 166 used_ring(&self) -> u64167 fn used_ring(&self) -> u64 { 168 self.lock_state().used_ring() 169 } 170 event_idx_enabled(&self) -> bool171 fn event_idx_enabled(&self) -> bool { 172 self.lock_state().event_idx_enabled() 173 } 174 pop_descriptor_chain<M>(&mut self, mem: M) -> Option<DescriptorChain<M>> where M: Clone + Deref, M::Target: GuestMemory,175 fn pop_descriptor_chain<M>(&mut self, mem: M) -> Option<DescriptorChain<M>> 176 where 177 M: Clone + Deref, 178 M::Target: GuestMemory, 179 { 180 self.lock_state().pop_descriptor_chain(mem) 181 } 182 } 183 184 #[cfg(test)] 185 mod tests { 186 use super::*; 187 use crate::defs::{DEFAULT_AVAIL_RING_ADDR, DEFAULT_DESC_TABLE_ADDR, DEFAULT_USED_RING_ADDR}; 188 use std::sync::Barrier; 189 use virtio_bindings::bindings::virtio_ring::VRING_USED_F_NO_NOTIFY; 190 use vm_memory::{Address, Bytes, GuestAddress, GuestAddressSpace, GuestMemoryMmap}; 191 192 #[test] test_queue_state_sync()193 fn test_queue_state_sync() { 194 let mut q = QueueSync::new(0x1000).unwrap(); 195 let mut q2 = q.clone(); 196 let q3 = q.clone(); 197 let barrier = Arc::new(Barrier::new(3)); 198 let b2 = barrier.clone(); 199 let b3 = barrier.clone(); 200 201 let t1 = std::thread::spawn(move || { 202 { 203 let guard = q2.lock(); 204 assert!(!guard.ready()); 205 } 206 b2.wait(); 207 b2.wait(); 208 { 209 let guard = q2.lock(); 210 assert!(guard.ready()); 211 } 212 }); 213 214 let t2 = std::thread::spawn(move || { 215 assert!(!q3.ready()); 216 b3.wait(); 217 b3.wait(); 218 assert!(q3.ready()); 219 }); 220 221 barrier.wait(); 222 q.set_ready(true); 223 barrier.wait(); 224 225 t1.join().unwrap(); 226 t2.join().unwrap(); 227 } 228 229 #[test] test_state_sync_add_used()230 fn test_state_sync_add_used() { 231 let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); 232 let mut q = QueueSync::new(0x100).unwrap(); 233 234 q.set_desc_table_address(Some(0x1000), None); 235 q.set_avail_ring_address(Some(0x2000), None); 236 q.set_used_ring_address(Some(0x3000), None); 237 q.set_event_idx(true); 238 q.set_ready(true); 239 assert!(q.is_valid(m.memory())); 240 assert_eq!(q.lock().size(), 0x100); 241 242 assert_eq!(q.max_size(), 0x100); 243 assert_eq!(q.size(), 0x100); 244 q.set_size(0x80); 245 assert_eq!(q.size(), 0x80); 246 assert_eq!(q.max_size(), 0x100); 247 q.set_next_avail(5); 248 assert_eq!(q.next_avail(), 5); 249 q.set_next_used(3); 250 assert_eq!(q.next_used(), 3); 251 assert_eq!( 252 q.avail_idx(m.memory(), Ordering::Acquire).unwrap(), 253 Wrapping(0) 254 ); 255 assert_eq!( 256 q.used_idx(m.memory(), Ordering::Acquire).unwrap(), 257 Wrapping(0) 258 ); 259 260 assert_eq!(q.next_used(), 3); 261 262 // index too large 263 assert!(q.add_used(m.memory(), 0x200, 0x1000).is_err()); 264 assert_eq!(q.next_used(), 3); 265 266 // should be ok 267 q.add_used(m.memory(), 1, 0x1000).unwrap(); 268 assert_eq!(q.next_used(), 4); 269 assert_eq!( 270 q.used_idx(m.memory(), Ordering::Acquire).unwrap(), 271 Wrapping(4) 272 ); 273 } 274 275 #[test] test_sync_state_reset_queue()276 fn test_sync_state_reset_queue() { 277 let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); 278 let mut q = QueueSync::new(0x100).unwrap(); 279 280 q.set_desc_table_address(Some(0x1000), None); 281 q.set_avail_ring_address(Some(0x2000), None); 282 q.set_used_ring_address(Some(0x3000), None); 283 q.set_event_idx(true); 284 q.set_next_avail(2); 285 q.set_next_used(2); 286 q.set_size(0x8); 287 q.set_ready(true); 288 assert!(q.is_valid(m.memory())); 289 290 q.needs_notification(m.memory()).unwrap(); 291 292 assert_eq!(q.lock_state().size(), 0x8); 293 assert!(q.lock_state().ready()); 294 assert_ne!(q.lock_state().desc_table(), DEFAULT_DESC_TABLE_ADDR); 295 assert_ne!(q.lock_state().avail_ring(), DEFAULT_AVAIL_RING_ADDR); 296 assert_ne!(q.lock_state().used_ring(), DEFAULT_USED_RING_ADDR); 297 assert_ne!(q.lock_state().next_avail(), 0); 298 assert_ne!(q.lock_state().next_used(), 0); 299 assert!(q.lock_state().event_idx_enabled()); 300 301 q.reset(); 302 assert_eq!(q.lock_state().size(), 0x100); 303 assert!(!q.lock_state().ready()); 304 assert_eq!(q.lock_state().desc_table(), DEFAULT_DESC_TABLE_ADDR); 305 assert_eq!(q.lock_state().avail_ring(), DEFAULT_AVAIL_RING_ADDR); 306 assert_eq!(q.lock_state().used_ring(), DEFAULT_USED_RING_ADDR); 307 assert_eq!(q.lock_state().next_avail(), 0); 308 assert_eq!(q.lock_state().next_used(), 0); 309 assert!(!q.lock_state().event_idx_enabled()); 310 } 311 312 #[test] test_enable_disable_notification()313 fn test_enable_disable_notification() { 314 let m = &GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10000)]).unwrap(); 315 let mem = m.memory(); 316 let mut q = QueueSync::new(0x100).unwrap(); 317 318 q.set_desc_table_address(Some(0x1000), None); 319 assert_eq!(q.desc_table(), 0x1000); 320 q.set_avail_ring_address(Some(0x2000), None); 321 assert_eq!(q.avail_ring(), 0x2000); 322 q.set_used_ring_address(Some(0x3000), None); 323 assert_eq!(q.used_ring(), 0x3000); 324 q.set_ready(true); 325 assert!(q.is_valid(mem)); 326 327 let used_addr = GuestAddress(q.lock_state().used_ring()); 328 329 assert!(!q.event_idx_enabled()); 330 q.enable_notification(mem).unwrap(); 331 let v = m.read_obj::<u16>(used_addr).map(u16::from_le).unwrap(); 332 assert_eq!(v, 0); 333 334 q.disable_notification(m.memory()).unwrap(); 335 let v = m.read_obj::<u16>(used_addr).map(u16::from_le).unwrap(); 336 assert_eq!(v, VRING_USED_F_NO_NOTIFY as u16); 337 338 q.enable_notification(mem).unwrap(); 339 let v = m.read_obj::<u16>(used_addr).map(u16::from_le).unwrap(); 340 assert_eq!(v, 0); 341 342 q.set_event_idx(true); 343 let avail_addr = GuestAddress(q.lock_state().avail_ring()); 344 m.write_obj::<u16>(u16::to_le(2), avail_addr.unchecked_add(2)) 345 .unwrap(); 346 347 assert!(q.enable_notification(mem).unwrap()); 348 q.lock_state().set_next_avail(2); 349 assert!(!q.enable_notification(mem).unwrap()); 350 351 m.write_obj::<u16>(u16::to_le(8), avail_addr.unchecked_add(2)) 352 .unwrap(); 353 354 assert!(q.enable_notification(mem).unwrap()); 355 q.lock_state().set_next_avail(8); 356 assert!(!q.enable_notification(mem).unwrap()); 357 } 358 } 359