1 // SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause 2 3 use std::{io::Write, num::Wrapping}; 4 5 use vm_memory::{bitmap::BitmapSlice, VolatileSlice}; 6 7 use crate::vhu_vsock::{Error, Result}; 8 9 #[derive(Debug)] 10 pub(crate) struct LocalTxBuf { 11 /// Buffer holding data to be forwarded to a host-side application 12 buf: Vec<u8>, 13 /// Index into buffer from which data can be consumed from the buffer 14 head: Wrapping<u32>, 15 /// Index into buffer from which data can be added to the buffer 16 tail: Wrapping<u32>, 17 } 18 19 impl LocalTxBuf { 20 /// Create a new instance of LocalTxBuf. new(buf_size: u32) -> Self21 pub fn new(buf_size: u32) -> Self { 22 Self { 23 buf: vec![0; buf_size as usize], 24 head: Wrapping(0), 25 tail: Wrapping(0), 26 } 27 } 28 29 /// Get the buffer size get_buf_size(&self) -> u3230 pub fn get_buf_size(&self) -> u32 { 31 self.buf.len() as u32 32 } 33 34 /// Check if the buf is empty. is_empty(&self) -> bool35 pub fn is_empty(&self) -> bool { 36 self.len() == 0 37 } 38 39 /// Add new data to the tx buffer, push all or none. 40 /// Returns LocalTxBufFull error if space not sufficient. push<B: BitmapSlice>(&mut self, data_buf: &VolatileSlice<B>) -> Result<()>41 pub fn push<B: BitmapSlice>(&mut self, data_buf: &VolatileSlice<B>) -> Result<()> { 42 if self.get_buf_size() as usize - self.len() < data_buf.len() { 43 // Tx buffer is full 44 return Err(Error::LocalTxBufFull); 45 } 46 47 // Get index into buffer at which data can be inserted 48 let tail_idx = self.tail.0 as usize % self.get_buf_size() as usize; 49 50 // Check if we can fit the data buffer between head and end of buffer 51 let len = std::cmp::min(self.get_buf_size() as usize - tail_idx, data_buf.len()); 52 let txbuf = &mut self.buf[tail_idx..tail_idx + len]; 53 data_buf.copy_to(txbuf); 54 55 // Check if there is more data to be wrapped around 56 if len < data_buf.len() { 57 let remain_txbuf = &mut self.buf[..(data_buf.len() - len)]; 58 data_buf.copy_to(remain_txbuf); 59 } 60 61 // Increment tail by the amount of data that has been added to the buffer 62 self.tail += Wrapping(data_buf.len() as u32); 63 64 Ok(()) 65 } 66 67 /// Flush buf data to stream. flush_to<S: Write>(&mut self, stream: &mut S) -> Result<usize>68 pub fn flush_to<S: Write>(&mut self, stream: &mut S) -> Result<usize> { 69 if self.is_empty() { 70 // No data to be flushed 71 return Ok(0); 72 } 73 74 // Get index into buffer from which data can be read 75 let head_idx = self.head.0 as usize % self.get_buf_size() as usize; 76 77 // First write from head to end of buffer 78 let len = std::cmp::min(self.get_buf_size() as usize - head_idx, self.len()); 79 let written = stream 80 .write(&self.buf[head_idx..(head_idx + len)]) 81 .map_err(Error::LocalTxBufFlush)?; 82 83 // Increment head by amount of data that has been flushed to the stream 84 self.head += Wrapping(written as u32); 85 86 // If written length is less than the expected length we can try again in the future 87 if written < len { 88 return Ok(written); 89 } 90 91 // The head index has wrapped around the end of the buffer, we call self again 92 Ok(written + self.flush_to(stream).unwrap_or(0)) 93 } 94 95 /// Return amount of data in the buffer. len(&self) -> usize96 fn len(&self) -> usize { 97 (self.tail - self.head).0 as usize 98 } 99 } 100 101 #[cfg(test)] 102 mod tests { 103 use super::*; 104 105 const CONN_TX_BUF_SIZE: u32 = 64 * 1024; 106 107 #[test] test_txbuf_len()108 fn test_txbuf_len() { 109 let mut loc_tx_buf = LocalTxBuf::new(CONN_TX_BUF_SIZE); 110 111 // Zero length tx buf 112 assert_eq!(loc_tx_buf.len(), 0); 113 114 // finite length tx buf 115 loc_tx_buf.head = Wrapping(0); 116 loc_tx_buf.tail = Wrapping(CONN_TX_BUF_SIZE); 117 assert_eq!(loc_tx_buf.len(), CONN_TX_BUF_SIZE as usize); 118 119 loc_tx_buf.tail = Wrapping(CONN_TX_BUF_SIZE / 2); 120 assert_eq!(loc_tx_buf.len(), (CONN_TX_BUF_SIZE / 2) as usize); 121 122 loc_tx_buf.head = Wrapping(256); 123 assert_eq!(loc_tx_buf.len(), 32512); 124 } 125 126 #[test] test_txbuf_is_empty()127 fn test_txbuf_is_empty() { 128 let mut loc_tx_buf = LocalTxBuf::new(CONN_TX_BUF_SIZE); 129 130 // empty tx buffer 131 assert!(loc_tx_buf.is_empty()); 132 133 // non empty tx buffer 134 loc_tx_buf.tail = Wrapping(CONN_TX_BUF_SIZE); 135 assert!(!loc_tx_buf.is_empty()); 136 } 137 138 #[test] test_txbuf_push()139 fn test_txbuf_push() { 140 let mut loc_tx_buf = LocalTxBuf::new(CONN_TX_BUF_SIZE); 141 let mut buf = [0; CONN_TX_BUF_SIZE as usize]; 142 // SAFETY: Safe as the buffer is guaranteed to be valid here. 143 let data = unsafe { VolatileSlice::new(buf.as_mut_ptr(), buf.len()) }; 144 145 // push data into empty tx buffer 146 let res_push = loc_tx_buf.push(&data); 147 assert!(res_push.is_ok()); 148 assert_eq!(loc_tx_buf.head, Wrapping(0)); 149 assert_eq!(loc_tx_buf.tail, Wrapping(CONN_TX_BUF_SIZE)); 150 151 // push data into full tx buffer 152 let res_push = loc_tx_buf.push(&data); 153 assert!(res_push.is_err()); 154 155 // head and tail wrap at full 156 loc_tx_buf.head = Wrapping(CONN_TX_BUF_SIZE); 157 let res_push = loc_tx_buf.push(&data); 158 assert!(res_push.is_ok()); 159 assert_eq!(loc_tx_buf.tail, Wrapping(CONN_TX_BUF_SIZE * 2)); 160 161 // only tail wraps at full 162 let mut buf = vec![1; 4]; 163 // SAFETY: Safe as the buffer is guaranteed to be valid here. 164 let data = unsafe { VolatileSlice::new(buf.as_mut_ptr(), buf.len()) }; 165 let mut cmp_data = vec![1; 4]; 166 cmp_data.append(&mut vec![0; (CONN_TX_BUF_SIZE - 4) as usize]); 167 loc_tx_buf.head = Wrapping(4); 168 loc_tx_buf.tail = Wrapping(CONN_TX_BUF_SIZE); 169 let res_push = loc_tx_buf.push(&data); 170 assert!(res_push.is_ok()); 171 assert_eq!(loc_tx_buf.head, Wrapping(4)); 172 assert_eq!(loc_tx_buf.tail, Wrapping(CONN_TX_BUF_SIZE + 4)); 173 assert_eq!(loc_tx_buf.buf, cmp_data); 174 } 175 176 #[test] test_txbuf_flush_to()177 fn test_txbuf_flush_to() { 178 let mut loc_tx_buf = LocalTxBuf::new(CONN_TX_BUF_SIZE); 179 180 // data to be flushed 181 let mut buf = vec![1; CONN_TX_BUF_SIZE as usize]; 182 // SAFETY: Safe as the buffer is guaranteed to be valid here. 183 let data = unsafe { VolatileSlice::new(buf.as_mut_ptr(), buf.len()) }; 184 185 // target to which data is flushed 186 let mut cmp_vec = Vec::with_capacity(data.len()); 187 188 // flush no data 189 let res_flush = loc_tx_buf.flush_to(&mut cmp_vec); 190 assert!(res_flush.is_ok()); 191 assert_eq!(res_flush.unwrap(), 0); 192 193 // flush data of CONN_TX_BUF_SIZE amount 194 let res_push = loc_tx_buf.push(&data); 195 assert!(res_push.is_ok()); 196 let res_flush = loc_tx_buf.flush_to(&mut cmp_vec); 197 if let Ok(n) = res_flush { 198 assert_eq!(loc_tx_buf.head, Wrapping(n as u32)); 199 assert_eq!(loc_tx_buf.tail, Wrapping(CONN_TX_BUF_SIZE)); 200 assert_eq!(n, cmp_vec.len()); 201 assert_eq!(cmp_vec, buf[..n]); 202 } 203 204 // wrapping head flush 205 let mut buf = vec![0; (CONN_TX_BUF_SIZE / 2) as usize]; 206 buf.append(&mut vec![1; (CONN_TX_BUF_SIZE / 2) as usize]); 207 // SAFETY: Safe as the buffer is guaranteed to be valid here. 208 let data = unsafe { VolatileSlice::new(buf.as_mut_ptr(), buf.len()) }; 209 210 loc_tx_buf.head = Wrapping(0); 211 loc_tx_buf.tail = Wrapping(0); 212 let res_push = loc_tx_buf.push(&data); 213 assert!(res_push.is_ok()); 214 cmp_vec.clear(); 215 loc_tx_buf.head = Wrapping(CONN_TX_BUF_SIZE / 2); 216 loc_tx_buf.tail = Wrapping(CONN_TX_BUF_SIZE + (CONN_TX_BUF_SIZE / 2)); 217 let res_flush = loc_tx_buf.flush_to(&mut cmp_vec); 218 if let Ok(n) = res_flush { 219 assert_eq!( 220 loc_tx_buf.head, 221 Wrapping(CONN_TX_BUF_SIZE + (CONN_TX_BUF_SIZE / 2)) 222 ); 223 assert_eq!( 224 loc_tx_buf.tail, 225 Wrapping(CONN_TX_BUF_SIZE + (CONN_TX_BUF_SIZE / 2)) 226 ); 227 assert_eq!(n, cmp_vec.len()); 228 let mut data = vec![1; (CONN_TX_BUF_SIZE / 2) as usize]; 229 data.append(&mut vec![0; (CONN_TX_BUF_SIZE / 2) as usize]); 230 assert_eq!(cmp_vec, data[..n]); 231 } 232 } 233 } 234