xref: /aosp_15_r20/external/crosvm/devices/src/usb/xhci/event_ring.rs (revision bb4ee6a4ae7042d18b07a98463b9c8b875e44b39)
1 // Copyright 2018 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 use std::mem::size_of;
6 use std::sync::atomic::fence;
7 use std::sync::atomic::Ordering;
8 
9 use remain::sorted;
10 use thiserror::Error;
11 use vm_memory::GuestAddress;
12 use vm_memory::GuestMemory;
13 use vm_memory::GuestMemoryError;
14 use zerocopy::AsBytes;
15 
16 use super::xhci_abi::EventRingSegmentTableEntry;
17 use super::xhci_abi::Trb;
18 
19 #[sorted]
20 #[derive(Error, Debug)]
21 pub enum Error {
22     #[error("event ring has a bad enqueue pointer: {0}")]
23     BadEnqueuePointer(GuestAddress),
24     #[error("event ring has a bad seg table addr: {0}")]
25     BadSegTableAddress(GuestAddress),
26     #[error("event ring has a bad seg table index: {0}")]
27     BadSegTableIndex(u16),
28     #[error("event ring is full")]
29     EventRingFull,
30     #[error("event ring cannot read from guest memory: {0}")]
31     MemoryRead(GuestMemoryError),
32     #[error("event ring cannot write to guest memory: {0}")]
33     MemoryWrite(GuestMemoryError),
34     #[error("event ring is uninitialized")]
35     Uninitialized,
36 }
37 
38 type Result<T> = std::result::Result<T, Error>;
39 
40 /// Event rings are segmented circular buffers used to pass event TRBs from the xHCI device back to
41 /// the guest.  Each event ring is associated with a single interrupter.  See section 4.9.4 of the
42 /// xHCI specification for more details.
43 /// This implementation is only for primary interrupter. Please review xhci spec before using it
44 /// for secondary.
45 pub struct EventRing {
46     mem: GuestMemory,
47     segment_table_size: u16,
48     segment_table_base_address: GuestAddress,
49     current_segment_index: u16,
50     trb_count: u16,
51     enqueue_pointer: GuestAddress,
52     dequeue_pointer: GuestAddress,
53     producer_cycle_state: bool,
54 }
55 
56 impl EventRing {
57     /// Create an empty, uninitialized event ring.
new(mem: GuestMemory) -> Self58     pub fn new(mem: GuestMemory) -> Self {
59         EventRing {
60             mem,
61             segment_table_size: 0,
62             segment_table_base_address: GuestAddress(0),
63             current_segment_index: 0,
64             enqueue_pointer: GuestAddress(0),
65             dequeue_pointer: GuestAddress(0),
66             trb_count: 0,
67             // As specified in xHCI spec 4.9.4, cycle state should be initialized to 1.
68             producer_cycle_state: true,
69         }
70     }
71 
72     /// This function implements left side of xHCI spec, Figure 4-12.
add_event(&mut self, mut trb: Trb) -> Result<()>73     pub fn add_event(&mut self, mut trb: Trb) -> Result<()> {
74         self.check_inited()?;
75         if self.is_full()? {
76             return Err(Error::EventRingFull);
77         }
78         // Event is write twice to avoid race condition.
79         // Guest kernel use cycle bit to check ownership, thus we should write cycle last.
80         trb.set_cycle(!self.producer_cycle_state);
81         self.mem
82             .write_obj_at_addr(trb, self.enqueue_pointer)
83             .map_err(Error::MemoryWrite)?;
84 
85         // Updating the cycle state bit should always happen after updating other parts.
86         fence(Ordering::SeqCst);
87 
88         trb.set_cycle(self.producer_cycle_state);
89 
90         // Offset of cycle state byte.
91         const CYCLE_STATE_OFFSET: usize = 12usize;
92         let data = trb.as_bytes();
93         // Trb contains 4 dwords, the last one contains cycle bit.
94         let cycle_bit_dword = &data[CYCLE_STATE_OFFSET..];
95         let address = self.enqueue_pointer;
96         let address = address
97             .checked_add(CYCLE_STATE_OFFSET as u64)
98             .ok_or(Error::BadEnqueuePointer(self.enqueue_pointer))?;
99         self.mem
100             .write_all_at_addr(cycle_bit_dword, address)
101             .map_err(Error::MemoryWrite)?;
102 
103         xhci_trace!(
104             "event write to pointer {:#x}, trb_count {}, {}",
105             self.enqueue_pointer.0,
106             self.trb_count,
107             trb
108         );
109         self.enqueue_pointer = match self.enqueue_pointer.checked_add(size_of::<Trb>() as u64) {
110             Some(addr) => addr,
111             None => return Err(Error::BadEnqueuePointer(self.enqueue_pointer)),
112         };
113         self.trb_count -= 1;
114         if self.trb_count == 0 {
115             self.current_segment_index += 1;
116             if self.current_segment_index == self.segment_table_size {
117                 self.producer_cycle_state ^= true;
118                 self.current_segment_index = 0;
119             }
120             self.load_current_seg_table_entry()?;
121         }
122         Ok(())
123     }
124 
125     /// Set segment table size.
set_seg_table_size(&mut self, size: u16) -> Result<()>126     pub fn set_seg_table_size(&mut self, size: u16) -> Result<()> {
127         xhci_trace!("set_seg_table_size({:#x})", size);
128         self.segment_table_size = size;
129         self.try_reconfigure_event_ring()
130     }
131 
132     /// Set segment table base addr.
set_seg_table_base_addr(&mut self, addr: GuestAddress) -> Result<()>133     pub fn set_seg_table_base_addr(&mut self, addr: GuestAddress) -> Result<()> {
134         xhci_trace!("set_seg_table_base_addr({:#x})", addr.0);
135         self.segment_table_base_address = addr;
136         self.try_reconfigure_event_ring()
137     }
138 
139     /// Set dequeue pointer.
set_dequeue_pointer(&mut self, addr: GuestAddress)140     pub fn set_dequeue_pointer(&mut self, addr: GuestAddress) {
141         xhci_trace!("set_dequeue_pointer({:#x})", addr.0);
142         self.dequeue_pointer = addr;
143     }
144 
145     /// Check if event ring is empty.
is_empty(&self) -> bool146     pub fn is_empty(&self) -> bool {
147         self.enqueue_pointer == self.dequeue_pointer
148     }
149 
150     /// Event ring is considered full when there is only space for one last TRB. In this case, xHC
151     /// should write an error Trb and do a bunch of handlings. See spec, figure 4-12 for more
152     /// details.
153     /// For now, we just check event ring full and fail (as it's unlikely to happen).
is_full(&self) -> Result<bool>154     pub fn is_full(&self) -> Result<bool> {
155         if self.trb_count == 1 {
156             // erst == event ring segment table
157             let next_erst_idx = (self.current_segment_index + 1) % self.segment_table_size;
158             let erst_entry = self.read_seg_table_entry(next_erst_idx)?;
159             Ok(self.dequeue_pointer.0 == erst_entry.get_ring_segment_base_address())
160         } else {
161             Ok(self.dequeue_pointer.0 == self.enqueue_pointer.0 + size_of::<Trb>() as u64)
162         }
163     }
164 
165     /// Try to init event ring. Will fail if seg table size/address are invalid.
try_reconfigure_event_ring(&mut self) -> Result<()>166     fn try_reconfigure_event_ring(&mut self) -> Result<()> {
167         if self.segment_table_size == 0 || self.segment_table_base_address.0 == 0 {
168             return Ok(());
169         }
170         self.load_current_seg_table_entry()
171     }
172 
173     // Check if this event ring is inited.
check_inited(&self) -> Result<()>174     fn check_inited(&self) -> Result<()> {
175         if self.segment_table_size == 0
176             || self.segment_table_base_address == GuestAddress(0)
177             || self.enqueue_pointer == GuestAddress(0)
178         {
179             return Err(Error::Uninitialized);
180         }
181         Ok(())
182     }
183 
184     // Load entry of current seg table.
load_current_seg_table_entry(&mut self) -> Result<()>185     fn load_current_seg_table_entry(&mut self) -> Result<()> {
186         let entry = self.read_seg_table_entry(self.current_segment_index)?;
187         self.enqueue_pointer = GuestAddress(entry.get_ring_segment_base_address());
188         self.trb_count = entry.get_ring_segment_size();
189         Ok(())
190     }
191 
192     // Get seg table entry at index.
read_seg_table_entry(&self, index: u16) -> Result<EventRingSegmentTableEntry>193     fn read_seg_table_entry(&self, index: u16) -> Result<EventRingSegmentTableEntry> {
194         let seg_table_addr = self.get_seg_table_addr(index)?;
195         // TODO(jkwang) We can refactor GuestMemory to allow in-place memory operation.
196         self.mem
197             .read_obj_from_addr(seg_table_addr)
198             .map_err(Error::MemoryRead)
199     }
200 
201     // Get seg table addr at index.
get_seg_table_addr(&self, index: u16) -> Result<GuestAddress>202     fn get_seg_table_addr(&self, index: u16) -> Result<GuestAddress> {
203         if index > self.segment_table_size {
204             return Err(Error::BadSegTableIndex(index));
205         }
206         self.segment_table_base_address
207             .checked_add(((size_of::<EventRingSegmentTableEntry>() as u16) * index) as u64)
208             .ok_or(Error::BadSegTableAddress(self.segment_table_base_address))
209     }
210 }
211 
212 #[cfg(test)]
213 mod test {
214     use std::mem::size_of;
215 
216     use base::pagesize;
217 
218     use super::*;
219 
220     #[test]
test_uninited()221     fn test_uninited() {
222         let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
223         let mut er = EventRing::new(gm);
224         let trb = Trb::new();
225         match er.add_event(trb).err().unwrap() {
226             Error::Uninitialized => {}
227             _ => panic!("unexpected error"),
228         }
229         assert_eq!(er.is_empty(), true);
230         assert_eq!(er.is_full().unwrap(), false);
231     }
232 
233     #[test]
test_event_ring()234     fn test_event_ring() {
235         let trb_size = size_of::<Trb>() as u64;
236         let gm = GuestMemory::new(&[(GuestAddress(0), pagesize() as u64)]).unwrap();
237         let mut er = EventRing::new(gm.clone());
238         let mut st_entries = [EventRingSegmentTableEntry::new(); 3];
239         st_entries[0].set_ring_segment_base_address(0x100);
240         st_entries[0].set_ring_segment_size(3);
241         st_entries[1].set_ring_segment_base_address(0x200);
242         st_entries[1].set_ring_segment_size(3);
243         st_entries[2].set_ring_segment_base_address(0x300);
244         st_entries[2].set_ring_segment_size(3);
245         gm.write_obj_at_addr(st_entries[0], GuestAddress(0x8))
246             .unwrap();
247         gm.write_obj_at_addr(
248             st_entries[1],
249             GuestAddress(0x8 + size_of::<EventRingSegmentTableEntry>() as u64),
250         )
251         .unwrap();
252         gm.write_obj_at_addr(
253             st_entries[2],
254             GuestAddress(0x8 + 2 * size_of::<EventRingSegmentTableEntry>() as u64),
255         )
256         .unwrap();
257         // Init event ring. Must init after segment tables writting.
258         er.set_seg_table_size(3).unwrap();
259         er.set_seg_table_base_addr(GuestAddress(0x8)).unwrap();
260         er.set_dequeue_pointer(GuestAddress(0x100));
261 
262         let mut trb = Trb::new();
263 
264         // Fill first table.
265         trb.set_control(1);
266         assert_eq!(er.is_empty(), true);
267         assert_eq!(er.is_full().unwrap(), false);
268         assert!(er.add_event(trb).is_ok());
269         assert_eq!(er.is_full().unwrap(), false);
270         assert_eq!(er.is_empty(), false);
271         let t: Trb = gm.read_obj_from_addr(GuestAddress(0x100)).unwrap();
272         assert_eq!(t.get_control(), 1);
273         assert_eq!(t.get_cycle(), true);
274 
275         trb.set_control(2);
276         assert!(er.add_event(trb).is_ok());
277         assert_eq!(er.is_full().unwrap(), false);
278         assert_eq!(er.is_empty(), false);
279         let t: Trb = gm
280             .read_obj_from_addr(GuestAddress(0x100 + trb_size))
281             .unwrap();
282         assert_eq!(t.get_control(), 2);
283         assert_eq!(t.get_cycle(), true);
284 
285         trb.set_control(3);
286         assert!(er.add_event(trb).is_ok());
287         assert_eq!(er.is_full().unwrap(), false);
288         assert_eq!(er.is_empty(), false);
289         let t: Trb = gm
290             .read_obj_from_addr(GuestAddress(0x100 + 2 * trb_size))
291             .unwrap();
292         assert_eq!(t.get_control(), 3);
293         assert_eq!(t.get_cycle(), true);
294 
295         // Fill second table.
296         trb.set_control(4);
297         assert!(er.add_event(trb).is_ok());
298         assert_eq!(er.is_full().unwrap(), false);
299         assert_eq!(er.is_empty(), false);
300         let t: Trb = gm.read_obj_from_addr(GuestAddress(0x200)).unwrap();
301         assert_eq!(t.get_control(), 4);
302         assert_eq!(t.get_cycle(), true);
303 
304         trb.set_control(5);
305         assert!(er.add_event(trb).is_ok());
306         assert_eq!(er.is_full().unwrap(), false);
307         assert_eq!(er.is_empty(), false);
308         let t: Trb = gm
309             .read_obj_from_addr(GuestAddress(0x200 + trb_size))
310             .unwrap();
311         assert_eq!(t.get_control(), 5);
312         assert_eq!(t.get_cycle(), true);
313 
314         trb.set_control(6);
315         assert!(er.add_event(trb).is_ok());
316         assert_eq!(er.is_full().unwrap(), false);
317         assert_eq!(er.is_empty(), false);
318         let t: Trb = gm
319             .read_obj_from_addr(GuestAddress(0x200 + 2 * trb_size))
320             .unwrap();
321         assert_eq!(t.get_control(), 6);
322         assert_eq!(t.get_cycle(), true);
323 
324         // Fill third table.
325         trb.set_control(7);
326         assert!(er.add_event(trb).is_ok());
327         assert_eq!(er.is_full().unwrap(), false);
328         assert_eq!(er.is_empty(), false);
329         let t: Trb = gm.read_obj_from_addr(GuestAddress(0x300)).unwrap();
330         assert_eq!(t.get_control(), 7);
331         assert_eq!(t.get_cycle(), true);
332 
333         trb.set_control(8);
334         assert!(er.add_event(trb).is_ok());
335         // There is only one last trb. Considered full.
336         assert_eq!(er.is_full().unwrap(), true);
337         assert_eq!(er.is_empty(), false);
338         let t: Trb = gm
339             .read_obj_from_addr(GuestAddress(0x300 + trb_size))
340             .unwrap();
341         assert_eq!(t.get_control(), 8);
342         assert_eq!(t.get_cycle(), true);
343 
344         // Add the last trb will result in error.
345         match er.add_event(trb) {
346             Err(Error::EventRingFull) => {}
347             _ => panic!("er should be full"),
348         };
349 
350         // Dequeue one trb.
351         er.set_dequeue_pointer(GuestAddress(0x100 + trb_size));
352         assert_eq!(er.is_full().unwrap(), false);
353         assert_eq!(er.is_empty(), false);
354 
355         // Fill the last trb of the third table.
356         trb.set_control(9);
357         assert!(er.add_event(trb).is_ok());
358         // There is only one last trb. Considered full.
359         assert_eq!(er.is_full().unwrap(), true);
360         assert_eq!(er.is_empty(), false);
361         let t: Trb = gm
362             .read_obj_from_addr(GuestAddress(0x300 + trb_size))
363             .unwrap();
364         assert_eq!(t.get_control(), 8);
365         assert_eq!(t.get_cycle(), true);
366 
367         // Add the last trb will result in error.
368         match er.add_event(trb) {
369             Err(Error::EventRingFull) => {}
370             _ => panic!("er should be full"),
371         };
372 
373         // Dequeue until empty.
374         er.set_dequeue_pointer(GuestAddress(0x100));
375         assert_eq!(er.is_full().unwrap(), false);
376         assert_eq!(er.is_empty(), true);
377 
378         // Fill first table again.
379         trb.set_control(10);
380         assert!(er.add_event(trb).is_ok());
381         assert_eq!(er.is_full().unwrap(), false);
382         assert_eq!(er.is_empty(), false);
383         let t: Trb = gm.read_obj_from_addr(GuestAddress(0x100)).unwrap();
384         assert_eq!(t.get_control(), 10);
385         // cycle bit should be reversed.
386         assert_eq!(t.get_cycle(), false);
387 
388         trb.set_control(11);
389         assert!(er.add_event(trb).is_ok());
390         assert_eq!(er.is_full().unwrap(), false);
391         assert_eq!(er.is_empty(), false);
392         let t: Trb = gm
393             .read_obj_from_addr(GuestAddress(0x100 + trb_size))
394             .unwrap();
395         assert_eq!(t.get_control(), 11);
396         assert_eq!(t.get_cycle(), false);
397 
398         trb.set_control(12);
399         assert!(er.add_event(trb).is_ok());
400         assert_eq!(er.is_full().unwrap(), false);
401         assert_eq!(er.is_empty(), false);
402         let t: Trb = gm
403             .read_obj_from_addr(GuestAddress(0x100 + 2 * trb_size))
404             .unwrap();
405         assert_eq!(t.get_control(), 12);
406         assert_eq!(t.get_cycle(), false);
407     }
408 }
409