1 // Copyright 2022 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 use std::io;
6 use std::io::Read;
7 use std::io::Write;
8 use std::result;
9 use std::sync::Arc;
10 use std::sync::MutexGuard;
11
12 use base::error;
13 use base::named_pipes::OverlappedWrapper;
14 use base::warn;
15 use base::Event;
16 use base::ReadNotifier;
17 use base::WaitContext;
18 use libc::EEXIST;
19 use net_util::TapT;
20 use sync::Mutex;
21 use virtio_sys::virtio_net;
22 use vm_memory::GuestMemory;
23
24 use super::super::super::base_features;
25 use super::super::super::net::Net;
26 use super::super::super::net::NetError;
27 use super::super::super::net::Token;
28 use super::super::super::net::Worker;
29 use super::super::super::net::MAX_BUFFER_SIZE;
30 use super::super::super::Interrupt;
31 use super::super::super::ProtectionType;
32 use super::super::super::Queue;
33 use super::super::super::Reader;
34
35 // This file should not be included at virtio mod level if slirp is not include. In case it is,
36 // throw a user friendly message.
37 #[cfg(not(feature = "slirp"))]
38 compile_error!("Net device without slirp not supported on windows");
39
validate_and_configure_tap<T: TapT>(_tap: &T, _vq_pairs: u16) -> Result<(), NetError>40 pub fn validate_and_configure_tap<T: TapT>(_tap: &T, _vq_pairs: u16) -> Result<(), NetError> {
41 // No-op for slirp on windows
42 Ok(())
43 }
44
virtio_features_to_tap_offload(_features: u64) -> u3245 pub fn virtio_features_to_tap_offload(_features: u64) -> u32 {
46 // slirp does not support offloads
47 0
48 }
49
50 // Copies a single frame from `self.rx_buf` into the guest. Returns true
51 // if a buffer was used, and false if the frame must be deferred until a buffer
52 // is made available by the driver.
rx_single_frame(rx_queue: &mut Queue, rx_buf: &mut [u8], rx_count: usize) -> bool53 fn rx_single_frame(rx_queue: &mut Queue, rx_buf: &mut [u8], rx_count: usize) -> bool {
54 let mut desc_chain = match rx_queue.pop() {
55 Some(desc) => desc,
56 None => return false,
57 };
58
59 match desc_chain.writer.write_all(&rx_buf[0..rx_count]) {
60 Ok(()) => (),
61 Err(ref e) if e.kind() == io::ErrorKind::WriteZero => {
62 warn!(
63 "net: rx: buffer is too small to hold frame of size {}",
64 rx_count
65 );
66 }
67 Err(e) => {
68 warn!("net: rx: failed to write slice: {}", e);
69 }
70 };
71
72 let bytes_written = desc_chain.writer.bytes_written() as u32;
73
74 rx_queue.add_used(desc_chain, bytes_written);
75
76 true
77 }
78
process_rx<T: TapT>( rx_queue: &mut Queue, tap: &mut T, rx_buf: &mut [u8], deferred_rx: &mut bool, rx_count: &mut usize, overlapped_wrapper: &mut OverlappedWrapper, ) -> bool79 pub fn process_rx<T: TapT>(
80 rx_queue: &mut Queue,
81 tap: &mut T,
82 rx_buf: &mut [u8],
83 deferred_rx: &mut bool,
84 rx_count: &mut usize,
85 overlapped_wrapper: &mut OverlappedWrapper,
86 ) -> bool {
87 let mut needs_interrupt = false;
88 let mut first_frame = true;
89
90 // Read as many frames as possible.
91 loop {
92 let res = if *deferred_rx {
93 // The existing buffer still needs to be sent to the rx queue.
94 Ok(*rx_count)
95 } else {
96 tap.try_read_result(overlapped_wrapper)
97 };
98 match res {
99 Ok(count) => {
100 *rx_count = count;
101 if !rx_single_frame(rx_queue, rx_buf, *rx_count) {
102 *deferred_rx = true;
103 break;
104 } else if first_frame {
105 rx_queue.trigger_interrupt();
106 first_frame = false;
107 } else {
108 needs_interrupt = true;
109 }
110
111 // SAFETY: safe because rx_buf & overlapped_wrapper live until
112 // the overlapped operation completes and are not used in any
113 // other operations until that time.
114 match unsafe { tap.read_overlapped(rx_buf, overlapped_wrapper) } {
115 Err(e) if e.kind() == std::io::ErrorKind::BrokenPipe => {
116 warn!("net: rx: read_overlapped failed: {}", e);
117 break;
118 }
119 Err(e) => {
120 panic!("read_overlapped failed: {}", e);
121 }
122 _ => {}
123 }
124
125 // We were able to dispatch a frame to the guest, so we can resume normal RX
126 // service.
127 *deferred_rx = false;
128 }
129 Err(e) => {
130 // `try_read_result()` shouldn't return any error other than
131 // `ERROR_IO_INCOMPLETE`. If it does, we need to retry the
132 // overlapped operation.
133 if e.kind() != std::io::ErrorKind::WouldBlock {
134 warn!("net: rx: failed to read tap: {}", e);
135 // SAFETY: safe because rx_buf & overlapped_wrapper live until
136 // the overlapped operation completes and are not used in any
137 // other operations until that time.
138 match unsafe { tap.read_overlapped(rx_buf, overlapped_wrapper) } {
139 Err(e) if e.kind() == std::io::ErrorKind::BrokenPipe => {
140 warn!("net: rx: read_overlapped failed: {}", e);
141 break;
142 }
143 Err(e) => {
144 panic!("read_overlapped failed: {}", e);
145 }
146 _ => {}
147 }
148 }
149 break;
150 }
151 }
152 }
153
154 needs_interrupt
155 }
156
process_tx<T: TapT>(tx_queue: &mut Queue, tap: &mut T)157 pub fn process_tx<T: TapT>(tx_queue: &mut Queue, tap: &mut T) {
158 // Reads up to `buf.len()` bytes or until there is no more data in `r`, whichever
159 // is smaller.
160 fn read_to_end(r: &mut Reader, buf: &mut [u8]) -> io::Result<usize> {
161 let mut count = 0;
162 while count < buf.len() {
163 match r.read(&mut buf[count..]) {
164 Ok(0) => break,
165 Ok(n) => count += n,
166 Err(e) => return Err(e),
167 }
168 }
169
170 Ok(count)
171 }
172
173 while let Some(mut desc_chain) = tx_queue.pop() {
174 let mut frame = [0u8; MAX_BUFFER_SIZE];
175 match read_to_end(&mut desc_chain.reader, &mut frame[..]) {
176 Ok(len) => {
177 // We need to copy frame into continuous buffer before writing it to
178 // slirp because tap requires frame to complete in a single write.
179 if let Err(err) = tap.write_all(&frame[..len]) {
180 error!("net: tx: failed to write to tap: {}", err);
181 }
182 }
183 Err(e) => error!("net: tx: failed to read frame into buffer: {}", e),
184 }
185
186 tx_queue.add_used(desc_chain, 0);
187 }
188
189 tx_queue.trigger_interrupt();
190 }
191
192 impl<T> Worker<T>
193 where
194 T: TapT + ReadNotifier,
195 {
process_rx_slirp(&mut self) -> bool196 pub(super) fn process_rx_slirp(&mut self) -> bool {
197 process_rx(
198 &mut self.rx_queue,
199 &mut self.tap,
200 &mut self.rx_buf,
201 &mut self.deferred_rx,
202 &mut self.rx_count,
203 &mut self.overlapped_wrapper,
204 )
205 }
206
handle_rx_token( &mut self, wait_ctx: &WaitContext<Token>, ) -> result::Result<(), NetError>207 pub(in crate::virtio) fn handle_rx_token(
208 &mut self,
209 wait_ctx: &WaitContext<Token>,
210 ) -> result::Result<(), NetError> {
211 let mut needs_interrupt = false;
212 // Process a deferred frame first if available. Don't read from tap again
213 // until we manage to receive this deferred frame.
214 if self.deferred_rx {
215 if rx_single_frame(&mut self.rx_queue, &mut self.rx_buf, self.rx_count) {
216 self.deferred_rx = false;
217 needs_interrupt = true;
218 } else {
219 // There is an outstanding deferred frame and the guest has not yet
220 // made any buffers available. Remove the tapfd from the poll
221 // context until more are made available.
222 wait_ctx
223 .delete(&self.tap)
224 .map_err(NetError::EventRemoveTap)?;
225 return Ok(());
226 }
227 }
228 needs_interrupt |= self.process_rx_slirp();
229 if needs_interrupt {
230 self.interrupt.signal_used_queue(self.rx_queue.vector());
231 }
232 Ok(())
233 }
234
handle_rx_queue( &mut self, wait_ctx: &WaitContext<Token>, _tap_polling_enabled: bool, ) -> result::Result<(), NetError>235 pub(in crate::virtio) fn handle_rx_queue(
236 &mut self,
237 wait_ctx: &WaitContext<Token>,
238 _tap_polling_enabled: bool,
239 ) -> result::Result<(), NetError> {
240 // There should be a buffer available now to receive the frame into.
241 if self.deferred_rx && rx_single_frame(&mut self.rx_queue, &mut self.rx_buf, self.rx_count)
242 {
243 // The guest has made buffers available, so add the tap back to the
244 // poll context in case it was removed.
245 match wait_ctx.add(&self.tap, Token::RxTap) {
246 Ok(_) => {}
247 Err(e) if e.errno() == EEXIST => {}
248 Err(e) => {
249 return Err(NetError::EventAddTap(e));
250 }
251 }
252 self.deferred_rx = false;
253 self.interrupt.signal_used_queue(self.rx_queue.vector());
254 }
255 Ok(())
256 }
257 }
258