1 // Copyright 2020 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 // Implementation of an Intel ICH10 Input/Output Advanced Programmable Interrupt Controller
6 // See https://www.intel.com/content/dam/doc/datasheet/io-controller-hub-10-family-datasheet.pdf
7 // for a specification.
8
9 use anyhow::Context;
10 use base::error;
11 use base::warn;
12 use base::Error;
13 use base::Event;
14 use base::Result;
15 use base::Tube;
16 use base::TubeError;
17 use hypervisor::IoapicRedirectionTableEntry;
18 use hypervisor::IoapicState;
19 use hypervisor::MsiAddressMessage;
20 use hypervisor::MsiDataMessage;
21 use hypervisor::TriggerMode;
22 use hypervisor::NUM_IOAPIC_PINS;
23 use remain::sorted;
24 use serde::Deserialize;
25 use serde::Serialize;
26 use thiserror::Error;
27 use vm_control::VmIrqRequest;
28 use vm_control::VmIrqResponse;
29
30 use super::IrqEvent;
31 use crate::bus::BusAccessInfo;
32 use crate::pci::CrosvmDeviceId;
33 use crate::BusDevice;
34 use crate::DeviceId;
35 use crate::IrqEventSource;
36 use crate::Suspendable;
37
38 // ICH10 I/O APIC version: 0x20
39 const IOAPIC_VERSION_ID: u32 = 0x00000020;
40 pub const IOAPIC_BASE_ADDRESS: u64 = 0xfec00000;
41 // The Intel manual does not specify this size, but KVM uses it.
42 pub const IOAPIC_MEM_LENGTH_BYTES: u64 = 0x100;
43
44 // Constants for IOAPIC direct register offset.
45 const IOAPIC_REG_ID: u8 = 0x00;
46 const IOAPIC_REG_VERSION: u8 = 0x01;
47 const IOAPIC_REG_ARBITRATION_ID: u8 = 0x02;
48
49 // Register offsets
50 const IOREGSEL_OFF: u8 = 0x0;
51 const IOREGSEL_DUMMY_UPPER_32_BITS_OFF: u8 = 0x4;
52 const IOWIN_OFF: u8 = 0x10;
53 const IOEOIR_OFF: u8 = 0x40;
54
55 const IOWIN_SCALE: u8 = 0x2;
56
57 /// Given an IRQ and whether or not the selector should refer to the high bits, return a selector
58 /// suitable to use as an offset to read to/write from.
59 #[allow(dead_code)]
encode_selector_from_irq(irq: usize, is_high_bits: bool) -> u860 fn encode_selector_from_irq(irq: usize, is_high_bits: bool) -> u8 {
61 (irq as u8) * IOWIN_SCALE + IOWIN_OFF + (is_high_bits as u8)
62 }
63
64 /// Given an offset that was read from/written to, return a tuple of the relevant IRQ and whether
65 /// the offset refers to the high bits of that register.
decode_irq_from_selector(selector: u8) -> (usize, bool)66 fn decode_irq_from_selector(selector: u8) -> (usize, bool) {
67 (
68 ((selector - IOWIN_OFF) / IOWIN_SCALE) as usize,
69 selector & 1 != 0,
70 )
71 }
72
73 // The RTC needs special treatment to work properly for Windows (or other OSs that use tick
74 // stuffing). In order to avoid time drift, we need to guarantee that the correct number of RTC
75 // interrupts are injected into the guest. This hack essentialy treats RTC interrupts as level
76 // triggered, which allows the IOAPIC to be responsible for interrupt coalescing and allows the
77 // IOAPIC to pass back whether or not the interrupt was coalesced to the CMOS (which allows the
78 // CMOS to perform tick stuffing). This deviates from the IOAPIC spec in ways very similar to (but
79 // not exactly the same as) KVM's IOAPIC.
80 const RTC_IRQ: usize = 0x8;
81
82 /// This struct is essentially the complete serialized form of [IrqEvent] as used in
83 /// [Ioapic::out_events].
84 ///
85 /// [Ioapic] stores MSIs used to back GSIs, but not enough information to re-create these MSIs
86 /// (it is missing the address & data). It also includes data that is unused by the userspace
87 /// ioapic (the per gsi resample event, [IrqEvent::resample_event], is always None). This
88 /// struct incorporates the necessary information for snapshotting, and excludes that which
89 /// is not required.
90 #[derive(Clone, Serialize, Deserialize)]
91 struct OutEventSnapshot {
92 gsi: u32,
93 msi_address: u64,
94 msi_data: u32,
95 source: IrqEventSource,
96 }
97
98 /// Snapshot of [Ioapic] state. Some fields were intentionally excluded:
99 /// * [Ioapic::resample_events]: these will get re-registered when the VM is created (e.g. prior to
100 /// restoring a snapshot).
101 /// * [Ioapic::out_events]: this isn't serializable as it contains Events. Replaced by
102 /// [IoapicSnapshot::out_event_snapshots].
103 /// * [Ioapic::irq_tube]: will be set up as part of creating the VM.
104 ///
105 /// See [Ioapic] for descriptions of fields by the same names.
106 #[derive(Serialize, Deserialize)]
107 struct IoapicSnapshot {
108 num_pins: usize,
109 ioregsel: u8,
110 ioapicid: u32,
111 rtc_remote_irr: bool,
112 out_event_snapshots: Vec<Option<OutEventSnapshot>>,
113 redirect_table: Vec<IoapicRedirectionTableEntry>,
114 interrupt_level: Vec<bool>,
115 }
116
117 /// Stores the outbound IRQ line in runtime & serializable forms.
118 struct OutEvent {
119 /// The actual IrqEvent used to dispatch IRQs when the VM is running.
120 irq_event: IrqEvent,
121 /// Serializable form of this IRQ line so that it can be re-created when
122 /// the VM is snapshotted & resumed. Will be None until the line is
123 /// completely set up.
124 snapshot: Option<OutEventSnapshot>,
125 }
126
127 pub struct Ioapic {
128 /// Number of supported IO-APIC inputs / redirection entries.
129 num_pins: usize,
130 /// ioregsel register. Used for selecting which entry of the redirect table to read/write.
131 ioregsel: u8,
132 /// ioapicid register. Bits 24 - 27 contain the APIC ID for this device.
133 ioapicid: u32,
134 /// Remote IRR for Edge Triggered Real Time Clock interrupts, which allows the CMOS to know
135 /// when one of its interrupts is being coalesced.
136 rtc_remote_irr: bool,
137 /// Outgoing irq events that are used to inject MSI interrupts.
138 /// Also contains the serializable form used for snapshotting.
139 out_events: Vec<Option<OutEvent>>,
140 /// Events that should be triggered on an EOI. The outer Vec is indexed by GSI, and the inner
141 /// Vec is an unordered list of registered resample events for the GSI.
142 resample_events: Vec<Vec<Event>>,
143 /// Redirection settings for each irq line.
144 redirect_table: Vec<IoapicRedirectionTableEntry>,
145 /// Interrupt activation state.
146 interrupt_level: Vec<bool>,
147 /// Tube used to route MSI irqs.
148 irq_tube: Tube,
149 }
150
151 impl BusDevice for Ioapic {
debug_label(&self) -> String152 fn debug_label(&self) -> String {
153 "userspace IOAPIC".to_string()
154 }
155
device_id(&self) -> DeviceId156 fn device_id(&self) -> DeviceId {
157 CrosvmDeviceId::Ioapic.into()
158 }
159
read(&mut self, info: BusAccessInfo, data: &mut [u8])160 fn read(&mut self, info: BusAccessInfo, data: &mut [u8]) {
161 if data.len() > 8 || data.is_empty() {
162 warn!("IOAPIC: Bad read size: {}", data.len());
163 return;
164 }
165 if info.offset >= IOAPIC_MEM_LENGTH_BYTES {
166 warn!("IOAPIC: Bad read from {}", info);
167 }
168 let out = match info.offset as u8 {
169 IOREGSEL_OFF => self.ioregsel.into(),
170 IOREGSEL_DUMMY_UPPER_32_BITS_OFF => 0,
171 IOWIN_OFF => self.ioapic_read(),
172 IOEOIR_OFF => 0,
173 _ => {
174 warn!("IOAPIC: Bad read from {}", info);
175 return;
176 }
177 };
178 let out_arr = out.to_ne_bytes();
179 for i in 0..4 {
180 if i < data.len() {
181 data[i] = out_arr[i];
182 }
183 }
184 }
185
write(&mut self, info: BusAccessInfo, data: &[u8])186 fn write(&mut self, info: BusAccessInfo, data: &[u8]) {
187 if data.len() > 8 || data.is_empty() {
188 warn!("IOAPIC: Bad write size: {}", data.len());
189 return;
190 }
191 if info.offset >= IOAPIC_MEM_LENGTH_BYTES {
192 warn!("IOAPIC: Bad write to {}", info);
193 }
194 match info.offset as u8 {
195 IOREGSEL_OFF => self.ioregsel = data[0],
196 IOREGSEL_DUMMY_UPPER_32_BITS_OFF => {} // Ignored.
197 IOWIN_OFF => {
198 if data.len() != 4 {
199 warn!("IOAPIC: Bad write size for iowin: {}", data.len());
200 return;
201 }
202 let data_arr = [data[0], data[1], data[2], data[3]];
203 let val = u32::from_ne_bytes(data_arr);
204 self.ioapic_write(val);
205 }
206 IOEOIR_OFF => self.end_of_interrupt(data[0]),
207 _ => {
208 warn!("IOAPIC: Bad write to {}", info);
209 }
210 }
211 }
212 }
213
214 impl Ioapic {
new(irq_tube: Tube, num_pins: usize) -> Result<Ioapic>215 pub fn new(irq_tube: Tube, num_pins: usize) -> Result<Ioapic> {
216 // TODO(dverkamp): clean this up once we are sure all callers use 24 pins.
217 assert_eq!(num_pins, NUM_IOAPIC_PINS);
218 let mut entry = IoapicRedirectionTableEntry::new();
219 entry.set_interrupt_mask(true);
220 Ok(Ioapic {
221 num_pins,
222 ioregsel: 0,
223 ioapicid: 0,
224 rtc_remote_irr: false,
225 out_events: (0..num_pins).map(|_| None).collect(),
226 resample_events: Vec::new(),
227 redirect_table: (0..num_pins).map(|_| entry).collect(),
228 interrupt_level: (0..num_pins).map(|_| false).collect(),
229 irq_tube,
230 })
231 }
232
get_ioapic_state(&self) -> IoapicState233 pub fn get_ioapic_state(&self) -> IoapicState {
234 // Convert vector of first NUM_IOAPIC_PINS active interrupts into an u32 value.
235 let level_bitmap = self
236 .interrupt_level
237 .iter()
238 .take(NUM_IOAPIC_PINS)
239 .rev()
240 .fold(0, |acc, &l| acc * 2 + l as u32);
241 let mut state = IoapicState {
242 base_address: IOAPIC_BASE_ADDRESS,
243 ioregsel: self.ioregsel,
244 ioapicid: self.ioapicid,
245 current_interrupt_level_bitmap: level_bitmap,
246 ..Default::default()
247 };
248 for (dst, src) in state
249 .redirect_table
250 .iter_mut()
251 .zip(self.redirect_table.iter())
252 {
253 *dst = *src;
254 }
255 state
256 }
257
set_ioapic_state(&mut self, state: &IoapicState)258 pub fn set_ioapic_state(&mut self, state: &IoapicState) {
259 self.ioregsel = state.ioregsel;
260 self.ioapicid = state.ioapicid & 0x0f00_0000;
261 for (src, dst) in state
262 .redirect_table
263 .iter()
264 .zip(self.redirect_table.iter_mut())
265 {
266 *dst = *src;
267 }
268 for (i, level) in self
269 .interrupt_level
270 .iter_mut()
271 .take(NUM_IOAPIC_PINS)
272 .enumerate()
273 {
274 *level = state.current_interrupt_level_bitmap & (1 << i) != 0;
275 }
276 }
277
register_resample_events(&mut self, resample_events: Vec<Vec<Event>>)278 pub fn register_resample_events(&mut self, resample_events: Vec<Vec<Event>>) {
279 self.resample_events = resample_events;
280 }
281
282 // The ioapic must be informed about EOIs in order to avoid sending multiple interrupts of the
283 // same type at the same time.
end_of_interrupt(&mut self, vector: u8)284 pub fn end_of_interrupt(&mut self, vector: u8) {
285 if self.redirect_table[RTC_IRQ].get_vector() == vector && self.rtc_remote_irr {
286 // Specifically clear RTC IRQ field
287 self.rtc_remote_irr = false;
288 }
289
290 for i in 0..self.num_pins {
291 if self.redirect_table[i].get_vector() == vector
292 && self.redirect_table[i].get_trigger_mode() == TriggerMode::Level
293 {
294 if self
295 .resample_events
296 .get(i)
297 .map_or(false, |events| !events.is_empty())
298 {
299 self.service_irq(i, false);
300 }
301
302 if let Some(resample_events) = self.resample_events.get(i) {
303 for resample_evt in resample_events {
304 resample_evt.signal().unwrap();
305 }
306 }
307 self.redirect_table[i].set_remote_irr(false);
308 }
309 // There is an inherent race condition in hardware if the OS is finished processing an
310 // interrupt and a new interrupt is delivered between issuing an EOI and the EOI being
311 // completed. When that happens the ioapic is supposed to re-inject the interrupt.
312 if self.interrupt_level[i] {
313 self.service_irq(i, true);
314 }
315 }
316 }
317
service_irq(&mut self, irq: usize, level: bool) -> bool318 pub fn service_irq(&mut self, irq: usize, level: bool) -> bool {
319 let entry = &mut self.redirect_table[irq];
320
321 // De-assert the interrupt.
322 if !level {
323 self.interrupt_level[irq] = false;
324 return true;
325 }
326
327 // If it's an edge-triggered interrupt that's already high we ignore it.
328 if entry.get_trigger_mode() == TriggerMode::Edge && self.interrupt_level[irq] {
329 return false;
330 }
331
332 self.interrupt_level[irq] = true;
333
334 // Interrupts are masked, so don't inject.
335 if entry.get_interrupt_mask() {
336 return false;
337 }
338
339 // Level-triggered and remote irr is already active, so we don't inject a new interrupt.
340 // (Coalesce with the prior one(s)).
341 if entry.get_trigger_mode() == TriggerMode::Level && entry.get_remote_irr() {
342 return false;
343 }
344
345 // Coalesce RTC interrupt to make tick stuffing work.
346 if irq == RTC_IRQ && self.rtc_remote_irr {
347 return false;
348 }
349
350 let injected = match self.out_events.get(irq) {
351 Some(Some(out_event)) => out_event.irq_event.event.signal().is_ok(),
352 _ => false,
353 };
354
355 if entry.get_trigger_mode() == TriggerMode::Level && level && injected {
356 entry.set_remote_irr(true);
357 } else if irq == RTC_IRQ && injected {
358 self.rtc_remote_irr = true;
359 }
360
361 injected
362 }
363
ioapic_write(&mut self, val: u32)364 fn ioapic_write(&mut self, val: u32) {
365 match self.ioregsel {
366 IOAPIC_REG_VERSION => { /* read-only register */ }
367 IOAPIC_REG_ID => self.ioapicid = val & 0x0f00_0000,
368 IOAPIC_REG_ARBITRATION_ID => { /* read-only register */ }
369 _ => {
370 if self.ioregsel < IOWIN_OFF {
371 // Invalid write; ignore.
372 return;
373 }
374 let (index, is_high_bits) = decode_irq_from_selector(self.ioregsel);
375 if index >= self.num_pins {
376 // Invalid write; ignore.
377 return;
378 }
379
380 let entry = &mut self.redirect_table[index];
381 if is_high_bits {
382 entry.set(32, 32, val.into());
383 } else {
384 let before = *entry;
385 entry.set(0, 32, val.into());
386
387 // respect R/O bits.
388 entry.set_delivery_status(before.get_delivery_status());
389 entry.set_remote_irr(before.get_remote_irr());
390
391 // Clear remote_irr when switching to edge_triggered.
392 if entry.get_trigger_mode() == TriggerMode::Edge {
393 entry.set_remote_irr(false);
394 }
395
396 // NOTE: on pre-4.0 kernels, there's a race we would need to work around.
397 // "KVM: x86: ioapic: Fix level-triggered EOI and IOAPIC reconfigure race"
398 // is the fix for this.
399 }
400
401 if self.redirect_table[index].get_trigger_mode() == TriggerMode::Level
402 && self.interrupt_level[index]
403 && !self.redirect_table[index].get_interrupt_mask()
404 {
405 self.service_irq(index, true);
406 }
407
408 let mut address = MsiAddressMessage::new();
409 let mut data = MsiDataMessage::new();
410 let entry = &self.redirect_table[index];
411 address.set_destination_mode(entry.get_dest_mode());
412 address.set_destination_id(entry.get_dest_id());
413 address.set_always_0xfee(0xfee);
414 data.set_vector(entry.get_vector());
415 data.set_delivery_mode(entry.get_delivery_mode());
416 data.set_trigger(entry.get_trigger_mode());
417
418 let msi_address = address.get(0, 32);
419 let msi_data = data.get(0, 32);
420 if let Err(e) = self.setup_msi(index, msi_address, msi_data as u32) {
421 error!("IOAPIC failed to set up MSI for index {}: {}", index, e);
422 }
423 }
424 }
425 }
426
setup_msi( &mut self, index: usize, msi_address: u64, msi_data: u32, ) -> std::result::Result<(), IoapicError>427 fn setup_msi(
428 &mut self,
429 index: usize,
430 msi_address: u64,
431 msi_data: u32,
432 ) -> std::result::Result<(), IoapicError> {
433 if msi_data == 0 {
434 // During boot, Linux first configures all ioapic pins with msi_data == 0; the routes
435 // aren't yet assigned to devices and aren't usable. We skip MSI setup if msi_data is
436 // 0.
437 return Ok(());
438 }
439
440 // Allocate a GSI and event for the outgoing route, if we haven't already done it.
441 // The event will be used on the "outgoing" end of the ioapic to send an interrupt to the
442 // apics: when an incoming ioapic irq line gets signalled, the ioapic writes to the
443 // corresponding outgoing event. The GSI number is used to update the routing info (MSI
444 // data and addr) for the event. The GSI and event are allocated only once for each ioapic
445 // irq line, when the guest first sets up the ioapic with a valid route. If the guest
446 // later reconfigures an ioapic irq line, the same GSI and event are reused, and we change
447 // the GSI's route to the new MSI data+addr destination.
448 let name = self.debug_label();
449 let gsi = if let Some(evt) = &self.out_events[index] {
450 evt.irq_event.gsi
451 } else {
452 let event = Event::new().map_err(IoapicError::CreateEvent)?;
453 let request = VmIrqRequest::AllocateOneMsi {
454 irqfd: event,
455 device_id: self.device_id().into(),
456 queue_id: index, // Use out_events index as queue_id for outgoing ioapic MSIs
457 device_name: name.clone(),
458 };
459 self.irq_tube
460 .send(&request)
461 .map_err(IoapicError::AllocateOneMsiSend)?;
462 match self
463 .irq_tube
464 .recv()
465 .map_err(IoapicError::AllocateOneMsiRecv)?
466 {
467 VmIrqResponse::AllocateOneMsi { gsi, .. } => {
468 self.out_events[index] = Some(OutEvent {
469 irq_event: IrqEvent {
470 gsi,
471 event: match request {
472 VmIrqRequest::AllocateOneMsi { irqfd, .. } => irqfd,
473 _ => unreachable!(),
474 },
475 resample_event: None,
476 // This source isn't currently used for anything, we already sent the
477 // relevant source information to the main thread via the AllocateOneMsi
478 // request, but we populate it anyways for debugging.
479 source: IrqEventSource {
480 device_id: self.device_id(),
481 queue_id: index,
482 device_name: name,
483 },
484 },
485 snapshot: None,
486 });
487 gsi
488 }
489 VmIrqResponse::Err(e) => return Err(IoapicError::AllocateOneMsi(e)),
490 _ => unreachable!(),
491 }
492 };
493
494 // Set the MSI route for the GSI. This controls which apic(s) get the interrupt when the
495 // ioapic's outgoing event is written, and various attributes of how the interrupt is
496 // delivered.
497 let request = VmIrqRequest::AddMsiRoute {
498 gsi,
499 msi_address,
500 msi_data,
501 };
502 self.irq_tube
503 .send(&request)
504 .map_err(IoapicError::AddMsiRouteSend)?;
505 if let VmIrqResponse::Err(e) = self.irq_tube.recv().map_err(IoapicError::AddMsiRouteRecv)? {
506 return Err(IoapicError::AddMsiRoute(e));
507 }
508
509 // Track this MSI route for snapshotting so it can be restored.
510 self.out_events[index]
511 .as_mut()
512 .expect("IRQ is guaranteed initialized")
513 .snapshot = Some(OutEventSnapshot {
514 gsi,
515 msi_address,
516 msi_data,
517 source: IrqEventSource {
518 device_id: self.device_id(),
519 queue_id: index,
520 device_name: self.debug_label(),
521 },
522 });
523 Ok(())
524 }
525
526 /// Similar to [Ioapic::setup_msi], but used only to re-create an MSI as
527 /// part of the snapshot restore process, which allows us to assume certain
528 /// invariants (like msi_data != 0) already hold.
restore_msi( &mut self, index: usize, gsi: u32, msi_address: u64, msi_data: u32, ) -> std::result::Result<(), IoapicError>529 fn restore_msi(
530 &mut self,
531 index: usize,
532 gsi: u32,
533 msi_address: u64,
534 msi_data: u32,
535 ) -> std::result::Result<(), IoapicError> {
536 let event = Event::new().map_err(IoapicError::CreateEvent)?;
537 let name = self.debug_label();
538 let request = VmIrqRequest::AllocateOneMsiAtGsi {
539 irqfd: event,
540 gsi,
541 device_id: self.device_id().into(),
542 queue_id: index, // Use out_events index as queue_id for outgoing ioapic MSIs
543 device_name: name.clone(),
544 };
545 self.irq_tube
546 .send(&request)
547 .map_err(IoapicError::AllocateOneMsiSend)?;
548 if let VmIrqResponse::Err(e) = self
549 .irq_tube
550 .recv()
551 .map_err(IoapicError::AllocateOneMsiRecv)?
552 {
553 return Err(IoapicError::AllocateOneMsi(e));
554 }
555
556 self.out_events[index] = Some(OutEvent {
557 irq_event: IrqEvent {
558 gsi,
559 event: match request {
560 VmIrqRequest::AllocateOneMsiAtGsi { irqfd, .. } => irqfd,
561 _ => unreachable!(),
562 },
563 resample_event: None,
564 // This source isn't currently used for anything, we already sent the
565 // relevant source information to the main thread via the AllocateOneMsi
566 // request, but we populate it anyways for debugging.
567 source: IrqEventSource {
568 device_id: self.device_id(),
569 queue_id: index,
570 device_name: name,
571 },
572 },
573 snapshot: None,
574 });
575
576 // Set the MSI route for the GSI. This controls which apic(s) get the interrupt when the
577 // ioapic's outgoing event is written, and various attributes of how the interrupt is
578 // delivered.
579 let request = VmIrqRequest::AddMsiRoute {
580 gsi,
581 msi_address,
582 msi_data,
583 };
584 self.irq_tube
585 .send(&request)
586 .map_err(IoapicError::AddMsiRouteSend)?;
587 if let VmIrqResponse::Err(e) = self.irq_tube.recv().map_err(IoapicError::AddMsiRouteRecv)? {
588 return Err(IoapicError::AddMsiRoute(e));
589 }
590
591 // Track this MSI route for snapshotting so it can be restored.
592 self.out_events[index]
593 .as_mut()
594 .expect("IRQ is guaranteed initialized")
595 .snapshot = Some(OutEventSnapshot {
596 gsi,
597 msi_address,
598 msi_data,
599 source: IrqEventSource {
600 device_id: self.device_id(),
601 queue_id: index,
602 device_name: self.debug_label(),
603 },
604 });
605 Ok(())
606 }
607
608 /// On warm restore, there could already be MSIs registered. We need to
609 /// release them in case the routing has changed (e.g. different
610 /// data <-> GSI).
release_all_msis(&mut self) -> std::result::Result<(), IoapicError>611 fn release_all_msis(&mut self) -> std::result::Result<(), IoapicError> {
612 for out_event in self.out_events.drain(..).flatten() {
613 let request = VmIrqRequest::ReleaseOneIrq {
614 gsi: out_event.irq_event.gsi,
615 irqfd: out_event.irq_event.event,
616 };
617
618 self.irq_tube
619 .send(&request)
620 .map_err(IoapicError::ReleaseOneIrqSend)?;
621 if let VmIrqResponse::Err(e) = self
622 .irq_tube
623 .recv()
624 .map_err(IoapicError::ReleaseOneIrqRecv)?
625 {
626 return Err(IoapicError::ReleaseOneIrq(e));
627 }
628 }
629 Ok(())
630 }
631
ioapic_read(&mut self) -> u32632 fn ioapic_read(&mut self) -> u32 {
633 match self.ioregsel {
634 IOAPIC_REG_VERSION => ((self.num_pins - 1) as u32) << 16 | IOAPIC_VERSION_ID,
635 IOAPIC_REG_ID | IOAPIC_REG_ARBITRATION_ID => self.ioapicid,
636 _ => {
637 if self.ioregsel < IOWIN_OFF {
638 // Invalid read; ignore and return 0.
639 0
640 } else {
641 let (index, is_high_bits) = decode_irq_from_selector(self.ioregsel);
642 if index < self.num_pins {
643 let offset = if is_high_bits { 32 } else { 0 };
644 self.redirect_table[index].get(offset, 32) as u32
645 } else {
646 !0 // Invalid index - return all 1s
647 }
648 }
649 }
650 }
651 }
652 }
653
654 impl Suspendable for Ioapic {
snapshot(&mut self) -> anyhow::Result<serde_json::Value>655 fn snapshot(&mut self) -> anyhow::Result<serde_json::Value> {
656 serde_json::to_value(IoapicSnapshot {
657 num_pins: self.num_pins,
658 ioregsel: self.ioregsel,
659 ioapicid: self.ioapicid,
660 rtc_remote_irr: self.rtc_remote_irr,
661 out_event_snapshots: self
662 .out_events
663 .iter()
664 .map(|out_event_opt| {
665 if let Some(out_event) = out_event_opt {
666 out_event.snapshot.clone()
667 } else {
668 None
669 }
670 })
671 .collect(),
672 redirect_table: self.redirect_table.clone(),
673 interrupt_level: self.interrupt_level.clone(),
674 })
675 .context("failed serializing Ioapic")
676 }
677
restore(&mut self, data: serde_json::Value) -> anyhow::Result<()>678 fn restore(&mut self, data: serde_json::Value) -> anyhow::Result<()> {
679 let snap: IoapicSnapshot =
680 serde_json::from_value(data).context("failed to deserialize Ioapic snapshot")?;
681
682 self.num_pins = snap.num_pins;
683 self.ioregsel = snap.ioregsel;
684 self.ioapicid = snap.ioapicid;
685 self.rtc_remote_irr = snap.rtc_remote_irr;
686 self.redirect_table = snap.redirect_table;
687 self.interrupt_level = snap.interrupt_level;
688 self.release_all_msis()
689 .context("failed to clear MSIs prior to restore")?;
690 self.out_events = (0..snap.num_pins).map(|_| None).collect();
691
692 for (index, maybe_out_event) in snap.out_event_snapshots.iter().enumerate() {
693 if let Some(out_event) = maybe_out_event {
694 self.restore_msi(
695 index,
696 out_event.gsi,
697 out_event.msi_address,
698 out_event.msi_data,
699 )?;
700 }
701 }
702 Ok(())
703 }
704
sleep(&mut self) -> anyhow::Result<()>705 fn sleep(&mut self) -> anyhow::Result<()> {
706 Ok(())
707 }
708
wake(&mut self) -> anyhow::Result<()>709 fn wake(&mut self) -> anyhow::Result<()> {
710 Ok(())
711 }
712 }
713
714 #[sorted]
715 #[derive(Error, Debug)]
716 enum IoapicError {
717 #[error("AddMsiRoute failed: {0}")]
718 AddMsiRoute(Error),
719 #[error("failed to receive AddMsiRoute response: {0}")]
720 AddMsiRouteRecv(TubeError),
721 #[error("failed to send AddMsiRoute request: {0}")]
722 AddMsiRouteSend(TubeError),
723 #[error("AllocateOneMsi failed: {0}")]
724 AllocateOneMsi(Error),
725 #[error("failed to receive AllocateOneMsi response: {0}")]
726 AllocateOneMsiRecv(TubeError),
727 #[error("failed to send AllocateOneMsi request: {0}")]
728 AllocateOneMsiSend(TubeError),
729 #[error("failed to create event object: {0}")]
730 CreateEvent(Error),
731 #[error("ReleaseOneIrq failed: {0}")]
732 ReleaseOneIrq(Error),
733 #[error("failed to receive ReleaseOneIrq response: {0}")]
734 ReleaseOneIrqRecv(TubeError),
735 #[error("failed to send ReleaseOneIrq request: {0}")]
736 ReleaseOneIrqSend(TubeError),
737 }
738
739 #[cfg(test)]
740 mod tests {
741 use std::thread;
742
743 use hypervisor::DeliveryMode;
744 use hypervisor::DeliveryStatus;
745 use hypervisor::DestinationMode;
746
747 use super::*;
748
749 const DEFAULT_VECTOR: u8 = 0x3a;
750 const DEFAULT_DESTINATION_ID: u8 = 0x5f;
751
new() -> Ioapic752 fn new() -> Ioapic {
753 let (_, irq_tube) = Tube::pair().unwrap();
754 Ioapic::new(irq_tube, NUM_IOAPIC_PINS).unwrap()
755 }
756
ioapic_bus_address(offset: u8) -> BusAccessInfo757 fn ioapic_bus_address(offset: u8) -> BusAccessInfo {
758 let offset = offset as u64;
759 BusAccessInfo {
760 offset,
761 address: IOAPIC_BASE_ADDRESS + offset,
762 id: 0,
763 }
764 }
765
set_up(trigger: TriggerMode) -> (Ioapic, usize)766 fn set_up(trigger: TriggerMode) -> (Ioapic, usize) {
767 let irq = NUM_IOAPIC_PINS - 1;
768 let ioapic = set_up_with_irq(irq, trigger);
769 (ioapic, irq)
770 }
771
set_up_with_irq(irq: usize, trigger: TriggerMode) -> Ioapic772 fn set_up_with_irq(irq: usize, trigger: TriggerMode) -> Ioapic {
773 let mut ioapic = self::new();
774 set_up_redirection_table_entry(&mut ioapic, irq, trigger);
775 ioapic.out_events[irq] = Some(OutEvent {
776 irq_event: IrqEvent {
777 gsi: NUM_IOAPIC_PINS as u32,
778 event: Event::new().unwrap(),
779 resample_event: None,
780 source: IrqEventSource {
781 device_id: ioapic.device_id(),
782 queue_id: irq,
783 device_name: ioapic.debug_label(),
784 },
785 },
786
787 snapshot: Some(OutEventSnapshot {
788 gsi: NUM_IOAPIC_PINS as u32,
789 msi_address: 0xa,
790 msi_data: 0xd,
791 source: IrqEventSource {
792 device_id: ioapic.device_id(),
793 queue_id: irq,
794 device_name: ioapic.debug_label(),
795 },
796 }),
797 });
798 ioapic
799 }
800
read_reg(ioapic: &mut Ioapic, selector: u8) -> u32801 fn read_reg(ioapic: &mut Ioapic, selector: u8) -> u32 {
802 let mut data = [0; 4];
803 ioapic.write(ioapic_bus_address(IOREGSEL_OFF), &[selector]);
804 ioapic.read(ioapic_bus_address(IOWIN_OFF), &mut data);
805 u32::from_ne_bytes(data)
806 }
807
write_reg(ioapic: &mut Ioapic, selector: u8, value: u32)808 fn write_reg(ioapic: &mut Ioapic, selector: u8, value: u32) {
809 ioapic.write(ioapic_bus_address(IOREGSEL_OFF), &[selector]);
810 ioapic.write(ioapic_bus_address(IOWIN_OFF), &value.to_ne_bytes());
811 }
812
read_entry(ioapic: &mut Ioapic, irq: usize) -> IoapicRedirectionTableEntry813 fn read_entry(ioapic: &mut Ioapic, irq: usize) -> IoapicRedirectionTableEntry {
814 let mut entry = IoapicRedirectionTableEntry::new();
815 entry.set(
816 0,
817 32,
818 read_reg(ioapic, encode_selector_from_irq(irq, false)).into(),
819 );
820 entry.set(
821 32,
822 32,
823 read_reg(ioapic, encode_selector_from_irq(irq, true)).into(),
824 );
825 entry
826 }
827
write_entry(ioapic: &mut Ioapic, irq: usize, entry: IoapicRedirectionTableEntry)828 fn write_entry(ioapic: &mut Ioapic, irq: usize, entry: IoapicRedirectionTableEntry) {
829 write_reg(
830 ioapic,
831 encode_selector_from_irq(irq, false),
832 entry.get(0, 32) as u32,
833 );
834 write_reg(
835 ioapic,
836 encode_selector_from_irq(irq, true),
837 entry.get(32, 32) as u32,
838 );
839 }
840
set_up_redirection_table_entry(ioapic: &mut Ioapic, irq: usize, trigger_mode: TriggerMode)841 fn set_up_redirection_table_entry(ioapic: &mut Ioapic, irq: usize, trigger_mode: TriggerMode) {
842 let mut entry = IoapicRedirectionTableEntry::new();
843 entry.set_vector(DEFAULT_DESTINATION_ID);
844 entry.set_delivery_mode(DeliveryMode::Startup);
845 entry.set_delivery_status(DeliveryStatus::Pending);
846 entry.set_dest_id(DEFAULT_VECTOR);
847 entry.set_trigger_mode(trigger_mode);
848 write_entry(ioapic, irq, entry);
849 }
850
set_mask(ioapic: &mut Ioapic, irq: usize, mask: bool)851 fn set_mask(ioapic: &mut Ioapic, irq: usize, mask: bool) {
852 let mut entry = read_entry(ioapic, irq);
853 entry.set_interrupt_mask(mask);
854 write_entry(ioapic, irq, entry);
855 }
856
857 #[test]
write_read_ioregsel()858 fn write_read_ioregsel() {
859 let mut ioapic = self::new();
860 let data_write = [0x0f, 0xf0, 0x01, 0xff];
861 let mut data_read = [0; 4];
862
863 for i in 0..data_write.len() {
864 ioapic.write(ioapic_bus_address(IOREGSEL_OFF), &data_write[i..i + 1]);
865 ioapic.read(ioapic_bus_address(IOREGSEL_OFF), &mut data_read[i..i + 1]);
866 assert_eq!(data_write[i], data_read[i]);
867 }
868 }
869
870 // Verify that version register is actually read-only.
871 #[test]
write_read_ioaic_reg_version()872 fn write_read_ioaic_reg_version() {
873 let mut ioapic = self::new();
874 let before = read_reg(&mut ioapic, IOAPIC_REG_VERSION);
875 let data_write = !before;
876
877 write_reg(&mut ioapic, IOAPIC_REG_VERSION, data_write);
878 assert_eq!(read_reg(&mut ioapic, IOAPIC_REG_VERSION), before);
879 }
880
881 // Verify that only bits 27:24 of the IOAPICID are readable/writable.
882 #[test]
write_read_ioapic_reg_id()883 fn write_read_ioapic_reg_id() {
884 let mut ioapic = self::new();
885
886 write_reg(&mut ioapic, IOAPIC_REG_ID, 0x1f3e5d7c);
887 assert_eq!(read_reg(&mut ioapic, IOAPIC_REG_ID), 0x0f000000);
888 }
889
890 // Write to read-only register IOAPICARB.
891 #[test]
write_read_ioapic_arbitration_id()892 fn write_read_ioapic_arbitration_id() {
893 let mut ioapic = self::new();
894
895 let data_write_id = 0x1f3e5d7c;
896 let expected_result = 0x0f000000;
897
898 // Write to IOAPICID. This should also change IOAPICARB.
899 write_reg(&mut ioapic, IOAPIC_REG_ID, data_write_id);
900
901 // Read IOAPICARB
902 assert_eq!(
903 read_reg(&mut ioapic, IOAPIC_REG_ARBITRATION_ID),
904 expected_result
905 );
906
907 // Try to write to IOAPICARB and verify unchanged result.
908 write_reg(&mut ioapic, IOAPIC_REG_ARBITRATION_ID, !data_write_id);
909 assert_eq!(
910 read_reg(&mut ioapic, IOAPIC_REG_ARBITRATION_ID),
911 expected_result
912 );
913 }
914
915 #[test]
916 #[should_panic(expected = "index out of bounds: the len is 24 but the index is 24")]
service_invalid_irq()917 fn service_invalid_irq() {
918 let mut ioapic = self::new();
919 ioapic.service_irq(NUM_IOAPIC_PINS, false);
920 }
921
922 // Test a level triggered IRQ interrupt.
923 #[test]
service_level_irq()924 fn service_level_irq() {
925 let (mut ioapic, irq) = set_up(TriggerMode::Level);
926
927 // TODO(mutexlox): Check that interrupt is fired once.
928 ioapic.service_irq(irq, true);
929 ioapic.service_irq(irq, false);
930 }
931
932 #[test]
service_multiple_level_irqs()933 fn service_multiple_level_irqs() {
934 let (mut ioapic, irq) = set_up(TriggerMode::Level);
935 // TODO(mutexlox): Check that interrupt is fired twice.
936 ioapic.service_irq(irq, true);
937 ioapic.service_irq(irq, false);
938 ioapic.end_of_interrupt(DEFAULT_DESTINATION_ID);
939 ioapic.service_irq(irq, true);
940 }
941
942 // Test multiple level interrupts without an EOI and verify that only one interrupt is
943 // delivered.
944 #[test]
coalesce_multiple_level_irqs()945 fn coalesce_multiple_level_irqs() {
946 let (mut ioapic, irq) = set_up(TriggerMode::Level);
947
948 // TODO(mutexlox): Test that only one interrupt is delivered.
949 ioapic.service_irq(irq, true);
950 ioapic.service_irq(irq, false);
951 ioapic.service_irq(irq, true);
952 }
953
954 // Test multiple RTC interrupts without an EOI and verify that only one interrupt is delivered.
955 #[test]
coalesce_multiple_rtc_irqs()956 fn coalesce_multiple_rtc_irqs() {
957 let irq = RTC_IRQ;
958 let mut ioapic = set_up_with_irq(irq, TriggerMode::Edge);
959
960 // TODO(mutexlox): Verify that only one IRQ is delivered.
961 ioapic.service_irq(irq, true);
962 ioapic.service_irq(irq, false);
963 ioapic.service_irq(irq, true);
964 }
965
966 // Test that a level interrupt that has been coalesced is re-raised if a guest issues an
967 // EndOfInterrupt after the interrupt was coalesced while the line is still asserted.
968 #[test]
reinject_level_interrupt()969 fn reinject_level_interrupt() {
970 let (mut ioapic, irq) = set_up(TriggerMode::Level);
971
972 // TODO(mutexlox): Verify that only one IRQ is delivered.
973 ioapic.service_irq(irq, true);
974 ioapic.service_irq(irq, false);
975 ioapic.service_irq(irq, true);
976
977 // TODO(mutexlox): Verify that this last interrupt occurs as a result of the EOI, rather
978 // than in response to the last service_irq.
979 ioapic.end_of_interrupt(DEFAULT_DESTINATION_ID);
980 }
981
982 #[test]
service_edge_triggered_irq()983 fn service_edge_triggered_irq() {
984 let (mut ioapic, irq) = set_up(TriggerMode::Edge);
985
986 // TODO(mutexlox): Verify that one interrupt is delivered.
987 ioapic.service_irq(irq, true);
988 ioapic.service_irq(irq, true); // Repeated asserts before a deassert should be ignored.
989 ioapic.service_irq(irq, false);
990 }
991
992 // Verify that the state of an edge-triggered interrupt is properly tracked even when the
993 // interrupt is disabled.
994 #[test]
edge_trigger_unmask_test()995 fn edge_trigger_unmask_test() {
996 let (mut ioapic, irq) = set_up(TriggerMode::Edge);
997
998 // TODO(mutexlox): Expect an IRQ.
999
1000 ioapic.service_irq(irq, true);
1001
1002 set_mask(&mut ioapic, irq, true);
1003 ioapic.service_irq(irq, false);
1004
1005 // No interrupt triggered while masked.
1006 ioapic.service_irq(irq, true);
1007 ioapic.service_irq(irq, false);
1008
1009 set_mask(&mut ioapic, irq, false);
1010
1011 // TODO(mutexlox): Expect another interrupt.
1012 // Interrupt triggered while unmasked, even though when it was masked the level was high.
1013 ioapic.service_irq(irq, true);
1014 ioapic.service_irq(irq, false);
1015 }
1016
1017 // Verify that a level-triggered interrupt that is triggered while masked will fire once the
1018 // interrupt is unmasked.
1019 #[test]
level_trigger_unmask_test()1020 fn level_trigger_unmask_test() {
1021 let (mut ioapic, irq) = set_up(TriggerMode::Level);
1022
1023 set_mask(&mut ioapic, irq, true);
1024 ioapic.service_irq(irq, true);
1025
1026 // TODO(mutexlox): expect an interrupt after this.
1027 set_mask(&mut ioapic, irq, false);
1028 }
1029
1030 // Verify that multiple asserts before a deassert are ignored even if there's an EOI between
1031 // them.
1032 #[test]
end_of_interrupt_edge_triggered_irq()1033 fn end_of_interrupt_edge_triggered_irq() {
1034 let (mut ioapic, irq) = set_up(TriggerMode::Edge);
1035
1036 // TODO(mutexlox): Expect 1 interrupt.
1037 ioapic.service_irq(irq, true);
1038 ioapic.end_of_interrupt(DEFAULT_DESTINATION_ID);
1039 // Repeated asserts before a de-assert should be ignored.
1040 ioapic.service_irq(irq, true);
1041 ioapic.service_irq(irq, false);
1042 }
1043
1044 // Send multiple edge-triggered interrupts in a row.
1045 #[test]
service_multiple_edge_irqs()1046 fn service_multiple_edge_irqs() {
1047 let (mut ioapic, irq) = set_up(TriggerMode::Edge);
1048
1049 ioapic.service_irq(irq, true);
1050 // TODO(mutexlox): Verify that an interrupt occurs here.
1051 ioapic.service_irq(irq, false);
1052
1053 ioapic.service_irq(irq, true);
1054 // TODO(mutexlox): Verify that an interrupt occurs here.
1055 ioapic.service_irq(irq, false);
1056 }
1057
1058 // Test an interrupt line with negative polarity.
1059 #[test]
service_negative_polarity_irq()1060 fn service_negative_polarity_irq() {
1061 let (mut ioapic, irq) = set_up(TriggerMode::Level);
1062
1063 let mut entry = read_entry(&mut ioapic, irq);
1064 entry.set_polarity(1);
1065 write_entry(&mut ioapic, irq, entry);
1066
1067 // TODO(mutexlox): Expect an interrupt to fire.
1068 ioapic.service_irq(irq, false);
1069 }
1070
1071 // Ensure that remote IRR can't be edited via mmio.
1072 #[test]
remote_irr_read_only()1073 fn remote_irr_read_only() {
1074 let (mut ioapic, irq) = set_up(TriggerMode::Level);
1075
1076 ioapic.redirect_table[irq].set_remote_irr(true);
1077
1078 let mut entry = read_entry(&mut ioapic, irq);
1079 entry.set_remote_irr(false);
1080 write_entry(&mut ioapic, irq, entry);
1081
1082 assert_eq!(read_entry(&mut ioapic, irq).get_remote_irr(), true);
1083 }
1084
1085 #[test]
delivery_status_read_only()1086 fn delivery_status_read_only() {
1087 let (mut ioapic, irq) = set_up(TriggerMode::Level);
1088
1089 ioapic.redirect_table[irq].set_delivery_status(DeliveryStatus::Pending);
1090
1091 let mut entry = read_entry(&mut ioapic, irq);
1092 entry.set_delivery_status(DeliveryStatus::Idle);
1093 write_entry(&mut ioapic, irq, entry);
1094
1095 assert_eq!(
1096 read_entry(&mut ioapic, irq).get_delivery_status(),
1097 DeliveryStatus::Pending
1098 );
1099 }
1100
1101 #[test]
level_to_edge_transition_clears_remote_irr()1102 fn level_to_edge_transition_clears_remote_irr() {
1103 let (mut ioapic, irq) = set_up(TriggerMode::Level);
1104
1105 ioapic.redirect_table[irq].set_remote_irr(true);
1106
1107 let mut entry = read_entry(&mut ioapic, irq);
1108 entry.set_trigger_mode(TriggerMode::Edge);
1109 write_entry(&mut ioapic, irq, entry);
1110
1111 assert_eq!(read_entry(&mut ioapic, irq).get_remote_irr(), false);
1112 }
1113
1114 #[test]
masking_preserves_remote_irr()1115 fn masking_preserves_remote_irr() {
1116 let (mut ioapic, irq) = set_up(TriggerMode::Level);
1117
1118 ioapic.redirect_table[irq].set_remote_irr(true);
1119
1120 set_mask(&mut ioapic, irq, true);
1121 set_mask(&mut ioapic, irq, false);
1122
1123 assert_eq!(read_entry(&mut ioapic, irq).get_remote_irr(), true);
1124 }
1125
1126 // Test reconfiguration racing with EOIs.
1127 #[test]
reconfiguration_race()1128 fn reconfiguration_race() {
1129 let (mut ioapic, irq) = set_up(TriggerMode::Level);
1130
1131 // Fire one level-triggered interrupt.
1132 // TODO(mutexlox): Check that it fires.
1133 ioapic.service_irq(irq, true);
1134
1135 // Read the redirection table entry before the EOI...
1136 let mut entry = read_entry(&mut ioapic, irq);
1137 entry.set_trigger_mode(TriggerMode::Edge);
1138
1139 ioapic.service_irq(irq, false);
1140 ioapic.end_of_interrupt(DEFAULT_DESTINATION_ID);
1141
1142 // ... and write back that (modified) value.
1143 write_entry(&mut ioapic, irq, entry);
1144
1145 // Fire one *edge* triggered interrupt
1146 // TODO(mutexlox): Assert that the interrupt fires once.
1147 ioapic.service_irq(irq, true);
1148 ioapic.service_irq(irq, false);
1149 }
1150
1151 // Ensure that swapping to edge triggered and back clears the remote irr bit.
1152 #[test]
implicit_eoi()1153 fn implicit_eoi() {
1154 let (mut ioapic, irq) = set_up(TriggerMode::Level);
1155
1156 // Fire one level-triggered interrupt.
1157 ioapic.service_irq(irq, true);
1158 // TODO(mutexlox): Verify that one interrupt was fired.
1159 ioapic.service_irq(irq, false);
1160
1161 // Do an implicit EOI by cycling between edge and level triggered.
1162 let mut entry = read_entry(&mut ioapic, irq);
1163 entry.set_trigger_mode(TriggerMode::Edge);
1164 write_entry(&mut ioapic, irq, entry);
1165 entry.set_trigger_mode(TriggerMode::Level);
1166 write_entry(&mut ioapic, irq, entry);
1167
1168 // Fire one level-triggered interrupt.
1169 ioapic.service_irq(irq, true);
1170 // TODO(mutexlox): Verify that one interrupt fires.
1171 ioapic.service_irq(irq, false);
1172 }
1173
1174 #[test]
set_redirection_entry_by_bits()1175 fn set_redirection_entry_by_bits() {
1176 let mut entry = IoapicRedirectionTableEntry::new();
1177 // destination_mode
1178 // polarity |
1179 // trigger_mode | |
1180 // | | |
1181 // 0011 1010 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 1001 0110 0101 1111
1182 // |_______| |______________________________________________|| | | |_| |_______|
1183 // dest_id reserved | | | | vector
1184 // interrupt_mask | | |
1185 // remote_irr | |
1186 // delivery_status |
1187 // delivery_mode
1188 entry.set(0, 64, 0x3a0000000000965f);
1189 assert_eq!(entry.get_vector(), 0x5f);
1190 assert_eq!(entry.get_delivery_mode(), DeliveryMode::Startup);
1191 assert_eq!(entry.get_dest_mode(), DestinationMode::Physical);
1192 assert_eq!(entry.get_delivery_status(), DeliveryStatus::Pending);
1193 assert_eq!(entry.get_polarity(), 0);
1194 assert_eq!(entry.get_remote_irr(), false);
1195 assert_eq!(entry.get_trigger_mode(), TriggerMode::Level);
1196 assert_eq!(entry.get_interrupt_mask(), false);
1197 assert_eq!(entry.get_reserved(), 0);
1198 assert_eq!(entry.get_dest_id(), 0x3a);
1199
1200 let (mut ioapic, irq) = set_up(TriggerMode::Edge);
1201 write_entry(&mut ioapic, irq, entry);
1202 assert_eq!(
1203 read_entry(&mut ioapic, irq).get_trigger_mode(),
1204 TriggerMode::Level
1205 );
1206
1207 // TODO(mutexlox): Verify that this actually fires an interrupt.
1208 ioapic.service_irq(irq, true);
1209 }
1210
1211 #[track_caller]
recv_allocate_msi(t: &Tube) -> u321212 fn recv_allocate_msi(t: &Tube) -> u32 {
1213 match t.recv::<VmIrqRequest>().unwrap() {
1214 VmIrqRequest::AllocateOneMsiAtGsi { gsi, .. } => gsi,
1215 msg => panic!("unexpected irqchip message: {:?}", msg),
1216 }
1217 }
1218
1219 struct MsiRouteDetails {
1220 gsi: u32,
1221 msi_address: u64,
1222 msi_data: u32,
1223 }
1224
1225 #[track_caller]
recv_add_msi_route(t: &Tube) -> MsiRouteDetails1226 fn recv_add_msi_route(t: &Tube) -> MsiRouteDetails {
1227 match t.recv::<VmIrqRequest>().unwrap() {
1228 VmIrqRequest::AddMsiRoute {
1229 gsi,
1230 msi_address,
1231 msi_data,
1232 } => MsiRouteDetails {
1233 gsi,
1234 msi_address,
1235 msi_data,
1236 },
1237 msg => panic!("unexpected irqchip message: {:?}", msg),
1238 }
1239 }
1240
1241 #[track_caller]
recv_release_one_irq(t: &Tube) -> u321242 fn recv_release_one_irq(t: &Tube) -> u32 {
1243 match t.recv::<VmIrqRequest>().unwrap() {
1244 VmIrqRequest::ReleaseOneIrq { gsi, irqfd: _ } => gsi,
1245 msg => panic!("unexpected irqchip message: {:?}", msg),
1246 }
1247 }
1248
1249 #[track_caller]
send_ok(t: &Tube)1250 fn send_ok(t: &Tube) {
1251 t.send(&VmIrqResponse::Ok).unwrap();
1252 }
1253
1254 /// Simulates restoring the ioapic as if the VM had never booted a guest.
1255 /// This is called the "cold" restore case since all the devices are
1256 /// expected to be essentially blank / unconfigured.
1257 #[test]
verify_ioapic_restore_cold_smoke()1258 fn verify_ioapic_restore_cold_smoke() {
1259 let (irqchip_tube, ioapic_irq_tube) = Tube::pair().unwrap();
1260 let gsi_num = NUM_IOAPIC_PINS as u32;
1261
1262 // Creates an ioapic w/ an MSI for GSI = NUM_IOAPIC_PINS, MSI
1263 // address 0xa, and data 0xd. The irq index (pin number) is 10, but
1264 // this is not meaningful.
1265 let mut saved_ioapic = set_up_with_irq(10, TriggerMode::Level);
1266
1267 // Take a snapshot of the ioapic.
1268 let snapshot = saved_ioapic.snapshot().unwrap();
1269
1270 // Create a fake irqchip to respond to our requests.
1271 let irqchip_fake = thread::spawn(move || {
1272 assert_eq!(recv_allocate_msi(&irqchip_tube), gsi_num);
1273 send_ok(&irqchip_tube);
1274 let route = recv_add_msi_route(&irqchip_tube);
1275 assert_eq!(route.gsi, gsi_num);
1276 assert_eq!(route.msi_address, 0xa);
1277 assert_eq!(route.msi_data, 0xd);
1278 send_ok(&irqchip_tube);
1279 irqchip_tube
1280 });
1281
1282 let mut restored_ioapic = Ioapic::new(ioapic_irq_tube, NUM_IOAPIC_PINS).unwrap();
1283 restored_ioapic.restore(snapshot).unwrap();
1284
1285 irqchip_fake.join().unwrap();
1286 }
1287
1288 /// In the warm case, we are restoring to an Ioapic that already exists and
1289 /// may have MSIs already allocated. Here, we're verifying the restore
1290 /// process releases any existing MSIs before registering the restored MSIs.
1291 #[test]
verify_ioapic_restore_warm_smoke()1292 fn verify_ioapic_restore_warm_smoke() {
1293 let (irqchip_tube, ioapic_irq_tube) = Tube::pair().unwrap();
1294 let gsi_num = NUM_IOAPIC_PINS as u32;
1295
1296 // Creates an ioapic w/ an MSI for GSI = NUM_IOAPIC_PINS, MSI
1297 // address 0xa, and data 0xd. The irq index (pin number) is 10, but
1298 // this is not meaningful.
1299 let mut ioapic = set_up_with_irq(10, TriggerMode::Level);
1300
1301 // We don't connect this Tube until after the IRQ is initially set up
1302 // as it triggers messages we don't want to assert on (they're about
1303 // ioapic functionality, not snapshotting).
1304 ioapic.irq_tube = ioapic_irq_tube;
1305
1306 // Take a snapshot of the ioapic.
1307 let snapshot = ioapic.snapshot().unwrap();
1308
1309 // Create a fake irqchip to respond to our requests.
1310 let irqchip_fake = thread::spawn(move || {
1311 // We should clear the existing MSI as the first restore step.
1312 assert_eq!(recv_release_one_irq(&irqchip_tube), gsi_num);
1313 send_ok(&irqchip_tube);
1314
1315 // Then re-allocate it as part of restoring.
1316 assert_eq!(recv_allocate_msi(&irqchip_tube), gsi_num);
1317 send_ok(&irqchip_tube);
1318 let route = recv_add_msi_route(&irqchip_tube);
1319 assert_eq!(route.gsi, gsi_num);
1320 assert_eq!(route.msi_address, 0xa);
1321 assert_eq!(route.msi_data, 0xd);
1322 send_ok(&irqchip_tube);
1323 irqchip_tube
1324 });
1325
1326 ioapic.restore(snapshot).unwrap();
1327
1328 irqchip_fake.join().unwrap();
1329 }
1330 }
1331