1 use alloc::vec;
2 
3 use super::net_buf::{RxBuffer, TxBuffer};
4 use super::{EthernetAddress, VirtIONetRaw};
5 use crate::{hal::Hal, transport::Transport, Error, Result};
6 
7 /// Driver for a VirtIO network device.
8 ///
9 /// Unlike [`VirtIONetRaw`], it uses [`RxBuffer`]s for transmission and
10 /// reception rather than the raw slices. On initialization, it pre-allocates
11 /// all receive buffers and puts them all in the receive queue.
12 ///
13 /// The virtio network device is a virtual ethernet card.
14 ///
15 /// It has enhanced rapidly and demonstrates clearly how support for new
16 /// features are added to an existing device.
17 /// Empty buffers are placed in one virtqueue for receiving packets, and
18 /// outgoing packets are enqueued into another for transmission in that order.
19 /// A third command queue is used to control advanced filtering features.
20 pub struct VirtIONet<H: Hal, T: Transport, const QUEUE_SIZE: usize> {
21     inner: VirtIONetRaw<H, T, QUEUE_SIZE>,
22     rx_buffers: [Option<RxBuffer>; QUEUE_SIZE],
23 }
24 
25 impl<H: Hal, T: Transport, const QUEUE_SIZE: usize> VirtIONet<H, T, QUEUE_SIZE> {
26     /// Create a new VirtIO-Net driver.
new(transport: T, buf_len: usize) -> Result<Self>27     pub fn new(transport: T, buf_len: usize) -> Result<Self> {
28         let mut inner = VirtIONetRaw::new(transport)?;
29 
30         const NONE_BUF: Option<RxBuffer> = None;
31         let mut rx_buffers = [NONE_BUF; QUEUE_SIZE];
32         for (i, rx_buf_place) in rx_buffers.iter_mut().enumerate() {
33             let mut rx_buf = RxBuffer::new(i, buf_len);
34             // Safe because the buffer lives as long as the queue.
35             let token = unsafe { inner.receive_begin(rx_buf.as_bytes_mut())? };
36             assert_eq!(token, i as u16);
37             *rx_buf_place = Some(rx_buf);
38         }
39 
40         Ok(VirtIONet { inner, rx_buffers })
41     }
42 
43     /// Acknowledge interrupt.
ack_interrupt(&mut self) -> bool44     pub fn ack_interrupt(&mut self) -> bool {
45         self.inner.ack_interrupt()
46     }
47 
48     /// Disable interrupts.
disable_interrupts(&mut self)49     pub fn disable_interrupts(&mut self) {
50         self.inner.disable_interrupts()
51     }
52 
53     /// Enable interrupts.
enable_interrupts(&mut self)54     pub fn enable_interrupts(&mut self) {
55         self.inner.enable_interrupts()
56     }
57 
58     /// Get MAC address.
mac_address(&self) -> EthernetAddress59     pub fn mac_address(&self) -> EthernetAddress {
60         self.inner.mac_address()
61     }
62 
63     /// Whether can send packet.
can_send(&self) -> bool64     pub fn can_send(&self) -> bool {
65         self.inner.can_send()
66     }
67 
68     /// Whether can receive packet.
can_recv(&self) -> bool69     pub fn can_recv(&self) -> bool {
70         self.inner.poll_receive().is_some()
71     }
72 
73     /// Receives a [`RxBuffer`] from network. If currently no data, returns an
74     /// error with type [`Error::NotReady`].
75     ///
76     /// It will try to pop a buffer that completed data reception in the
77     /// NIC queue.
receive(&mut self) -> Result<RxBuffer>78     pub fn receive(&mut self) -> Result<RxBuffer> {
79         if let Some(token) = self.inner.poll_receive() {
80             let mut rx_buf = self.rx_buffers[token as usize]
81                 .take()
82                 .ok_or(Error::WrongToken)?;
83             if token != rx_buf.idx {
84                 return Err(Error::WrongToken);
85             }
86 
87             // Safe because `token` == `rx_buf.idx`, we are passing the same
88             // buffer as we passed to `VirtQueue::add` and it is still valid.
89             let (_hdr_len, pkt_len) =
90                 unsafe { self.inner.receive_complete(token, rx_buf.as_bytes_mut())? };
91             rx_buf.set_packet_len(pkt_len);
92             Ok(rx_buf)
93         } else {
94             Err(Error::NotReady)
95         }
96     }
97 
98     /// Gives back the ownership of `rx_buf`, and recycles it for next use.
99     ///
100     /// It will add the buffer back to the NIC queue.
recycle_rx_buffer(&mut self, mut rx_buf: RxBuffer) -> Result101     pub fn recycle_rx_buffer(&mut self, mut rx_buf: RxBuffer) -> Result {
102         // Safe because we take the ownership of `rx_buf` back to `rx_buffers`,
103         // it lives as long as the queue.
104         let new_token = unsafe { self.inner.receive_begin(rx_buf.as_bytes_mut()) }?;
105         // `rx_buffers[new_token]` is expected to be `None` since it was taken
106         // away at `Self::receive()` and has not been added back.
107         if self.rx_buffers[new_token as usize].is_some() {
108             return Err(Error::WrongToken);
109         }
110         rx_buf.idx = new_token;
111         self.rx_buffers[new_token as usize] = Some(rx_buf);
112         Ok(())
113     }
114 
115     /// Allocate a new buffer for transmitting.
new_tx_buffer(&self, buf_len: usize) -> TxBuffer116     pub fn new_tx_buffer(&self, buf_len: usize) -> TxBuffer {
117         TxBuffer(vec![0; buf_len])
118     }
119 
120     /// Sends a [`TxBuffer`] to the network, and blocks until the request
121     /// completed.
send(&mut self, tx_buf: TxBuffer) -> Result122     pub fn send(&mut self, tx_buf: TxBuffer) -> Result {
123         self.inner.send(tx_buf.packet())
124     }
125 }
126