xref: /aosp_15_r20/external/crosvm/devices/src/irqchip/kvm/riscv64.rs (revision bb4ee6a4ae7042d18b07a98463b9c8b875e44b39)
1 // Copyright 2023 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 use std::sync::Arc;
6 
7 use base::errno_result;
8 use base::ioctl_with_ref;
9 use base::AsRawDescriptor;
10 use base::Error as BaseError;
11 use base::RawDescriptor;
12 use base::Result;
13 use base::SafeDescriptor;
14 use hypervisor::kvm::KvmVcpu;
15 use hypervisor::kvm::KvmVm;
16 use hypervisor::DeviceKind;
17 use hypervisor::IrqRoute;
18 use hypervisor::Vm;
19 use kvm_sys::*;
20 use sync::Mutex;
21 
22 use crate::IrqChip;
23 use crate::IrqChipRiscv64;
24 
25 const RISCV_IRQCHIP: u64 = 0x0800_0000;
26 
27 const KVM_DEV_RISCV_AIA_ADDR_APLIC: u64 = 0;
28 
29 pub const AIA_IMSIC_BASE: u64 = RISCV_IRQCHIP;
30 const KVM_DEV_RISCV_IMSIC_SIZE: u64 = 0x1000;
31 
aia_addr_imsic(vcpu_id: u64) -> u6432 pub const fn aia_addr_imsic(vcpu_id: u64) -> u64 {
33     1 + vcpu_id
34 }
35 
aia_imsic_addr(hart: usize) -> u6436 pub const fn aia_imsic_addr(hart: usize) -> u64 {
37     AIA_IMSIC_BASE + (hart as u64) * KVM_DEV_RISCV_IMSIC_SIZE
38 }
39 
aia_imsic_size(num_harts: usize) -> u6440 pub const fn aia_imsic_size(num_harts: usize) -> u64 {
41     num_harts as u64 * KVM_DEV_RISCV_IMSIC_SIZE
42 }
43 
aia_aplic_addr(num_harts: usize) -> u6444 pub const fn aia_aplic_addr(num_harts: usize) -> u64 {
45     AIA_IMSIC_BASE + (num_harts as u64) * KVM_DEV_RISCV_IMSIC_SIZE
46 }
47 pub const AIA_APLIC_SIZE: u32 = 0x4000;
48 
49 // Connstants for get/set attributes calls.
50 const KVM_DEV_RISCV_AIA_GRP_CONFIG: u32 = 0;
51 
52 const KVM_DEV_RISCV_AIA_CONFIG_MODE: u64 = 0;
53 const KVM_DEV_RISCV_AIA_CONFIG_IDS: u64 = 1;
54 const KVM_DEV_RISCV_AIA_CONFIG_SRCS: u64 = 2;
55 const KVM_DEV_RISCV_AIA_CONFIG_HART_BITS: u64 = 5;
56 
57 pub const IMSIC_MAX_INT_IDS: u64 = 2047;
58 
59 // CONFIG_MODE values
60 const AIA_MODE_HWACCEL: u32 = 1;
61 const AIA_MODE_AUTO: u32 = 2;
62 
63 const KVM_DEV_RISCV_AIA_GRP_ADDR: u32 = 1;
64 
65 const KVM_DEV_RISCV_AIA_GRP_CTRL: u32 = 2;
66 
67 struct AiaDescriptor(SafeDescriptor);
68 
69 impl AiaDescriptor {
try_clone(&self) -> Result<AiaDescriptor>70     fn try_clone(&self) -> Result<AiaDescriptor> {
71         self.0.try_clone().map(AiaDescriptor)
72     }
73 
aia_init(&self) -> Result<()>74     fn aia_init(&self) -> Result<()> {
75         let init_attr = kvm_device_attr {
76             group: KVM_DEV_RISCV_AIA_GRP_CTRL,
77             attr: KVM_DEV_RISCV_AIA_CTRL_INIT as u64,
78             addr: 0,
79             flags: 0,
80         };
81 
82         // Safe because we allocated the struct that's being passed in, and raw_aia_mode is pointing
83         // to a uniquely owned local, mutable variable.
84         let ret = unsafe { ioctl_with_ref(self, KVM_SET_DEVICE_ATTR, &init_attr) };
85         if ret != 0 {
86             return errno_result();
87         }
88         Ok(())
89     }
90 
get_num_ids(&self) -> Result<u32>91     fn get_num_ids(&self) -> Result<u32> {
92         let mut aia_num_ids = 0;
93         let raw_num_ids = &mut aia_num_ids as *mut u32;
94 
95         let aia_num_ids_attr = kvm_device_attr {
96             group: KVM_DEV_RISCV_AIA_GRP_CONFIG,
97             attr: KVM_DEV_RISCV_AIA_CONFIG_IDS,
98             addr: raw_num_ids as u64,
99             flags: 0,
100         };
101 
102         // Safe because we allocated the struct that's being passed in, and raw_num_ids is pointing
103         // to a uniquely owned local, mutable variable.
104         let ret = unsafe { ioctl_with_ref(self, KVM_GET_DEVICE_ATTR, &aia_num_ids_attr) };
105         if ret != 0 {
106             return errno_result();
107         }
108         Ok(aia_num_ids)
109     }
110 
get_aia_mode(&self) -> Result<u32>111     fn get_aia_mode(&self) -> Result<u32> {
112         let mut aia_mode: u32 = AIA_MODE_HWACCEL;
113         let raw_aia_mode = &mut aia_mode as *mut u32;
114         let aia_mode_attr = kvm_device_attr {
115             group: KVM_DEV_RISCV_AIA_GRP_CONFIG,
116             attr: KVM_DEV_RISCV_AIA_CONFIG_MODE,
117             addr: raw_aia_mode as u64,
118             flags: 0,
119         };
120         // Safe because we allocated the struct that's being passed in, and raw_aia_mode is pointing
121         // to a uniquely owned local, mutable variable.
122         let ret = unsafe { ioctl_with_ref(self, KVM_GET_DEVICE_ATTR, &aia_mode_attr) };
123         if ret != 0 {
124             return errno_result();
125         }
126         Ok(aia_mode)
127     }
128 
set_num_sources(&self, num_sources: u32) -> Result<()>129     fn set_num_sources(&self, num_sources: u32) -> Result<()> {
130         let raw_num_sources = &num_sources as *const u32;
131         let kvm_attr = kvm_device_attr {
132             group: KVM_DEV_RISCV_AIA_GRP_CONFIG,
133             attr: KVM_DEV_RISCV_AIA_CONFIG_SRCS,
134             addr: raw_num_sources as u64,
135             flags: 0,
136         };
137         // Safe because we allocated the struct that's being passed in, and raw_aia_mode is pointing
138         // to a uniquely owned local, mutable variable.
139         let ret = unsafe { ioctl_with_ref(self, KVM_SET_DEVICE_ATTR, &kvm_attr) };
140         if ret != 0 {
141             return errno_result();
142         }
143         Ok(())
144     }
145 
set_hart_bits(&self, hart_bits: u32) -> Result<()>146     fn set_hart_bits(&self, hart_bits: u32) -> Result<()> {
147         let raw_hart_bits = &hart_bits as *const u32;
148         let kvm_attr = kvm_device_attr {
149             group: KVM_DEV_RISCV_AIA_GRP_CONFIG,
150             attr: KVM_DEV_RISCV_AIA_CONFIG_HART_BITS,
151             addr: raw_hart_bits as u64,
152             flags: 0,
153         };
154         // Safe because we allocated the struct that's being passed in, and raw_aia_mode is pointing
155         // to a uniquely owned local, mutable variable.
156         let ret = unsafe { ioctl_with_ref(self, KVM_SET_DEVICE_ATTR, &kvm_attr) };
157         if ret != 0 {
158             return errno_result();
159         }
160         Ok(())
161     }
162 
set_aplic_addrs(&self, num_vcpus: usize) -> Result<()>163     fn set_aplic_addrs(&self, num_vcpus: usize) -> Result<()> {
164         /* Set AIA device addresses */
165         let aplic_addr = aia_aplic_addr(num_vcpus);
166         let raw_aplic_addr = &aplic_addr as *const u64;
167         let kvm_attr = kvm_device_attr {
168             group: KVM_DEV_RISCV_AIA_GRP_ADDR,
169             attr: KVM_DEV_RISCV_AIA_ADDR_APLIC,
170             addr: raw_aplic_addr as u64,
171             flags: 0,
172         };
173         // Safe because we allocated the struct that's being passed in, and raw_aplic_addr is
174         // pointing to a uniquely owned local, mutable variable.
175         let ret = unsafe { ioctl_with_ref(self, KVM_SET_DEVICE_ATTR, &kvm_attr) };
176         if ret != 0 {
177             return errno_result();
178         }
179         for i in 0..num_vcpus {
180             let imsic_addr = aia_imsic_addr(i);
181             let raw_imsic_addr = &imsic_addr as *const u64;
182             let kvm_attr = kvm_device_attr {
183                 group: KVM_DEV_RISCV_AIA_GRP_ADDR,
184                 attr: aia_addr_imsic(i as u64),
185                 addr: raw_imsic_addr as u64,
186                 flags: 0,
187             };
188             // Safe because we allocated the struct that's being passed in, and raw_imsic_addr is
189             // pointing to a uniquely owned local, mutable variable.
190             let ret = unsafe { ioctl_with_ref(self, KVM_SET_DEVICE_ATTR, &kvm_attr) };
191             if ret != 0 {
192                 return errno_result();
193             }
194         }
195         Ok(())
196     }
197 }
198 
199 impl AsRawDescriptor for AiaDescriptor {
as_raw_descriptor(&self) -> RawDescriptor200     fn as_raw_descriptor(&self) -> RawDescriptor {
201         self.0.as_raw_descriptor()
202     }
203 }
204 
205 /// IrqChip implementation where the entire IrqChip is emulated by KVM.
206 ///
207 /// This implementation will use the KVM API to create and configure the in-kernel irqchip.
208 pub struct KvmKernelIrqChip {
209     pub(super) vm: KvmVm,
210     pub(super) vcpus: Arc<Mutex<Vec<Option<KvmVcpu>>>>,
211     num_vcpus: usize,
212     num_ids: usize,     // number of imsics ids
213     num_sources: usize, // number of aplic sources
214     aia: AiaDescriptor,
215     device_kind: DeviceKind,
216     pub(super) routes: Arc<Mutex<Vec<IrqRoute>>>,
217 }
218 
219 impl KvmKernelIrqChip {
220     /// Construct a new KvmKernelIrqchip.
new(vm: KvmVm, num_vcpus: usize) -> Result<KvmKernelIrqChip>221     pub fn new(vm: KvmVm, num_vcpus: usize) -> Result<KvmKernelIrqChip> {
222         let aia = AiaDescriptor(vm.create_device(DeviceKind::RiscvAia)?);
223 
224         let aia_mode = aia.get_aia_mode()?;
225         // Only support full emulation in the kernel.
226         if aia_mode != AIA_MODE_HWACCEL && aia_mode != AIA_MODE_AUTO {
227             return Err(BaseError::new(libc::ENOTSUP));
228         }
229 
230         // Don't need any wired interrupts, riscv can run PCI/MSI(X) only.
231         const NUM_SOURCES: u32 = 0;
232         aia.set_num_sources(NUM_SOURCES)?;
233 
234         let num_ids = aia.get_num_ids()?;
235 
236         // set the number of bits needed for this count of harts.
237         // Need at least one bit.
238         let max_hart_idx = num_vcpus as u64 - 1;
239         let num_hart_bits = std::cmp::max(1, 64 - max_hart_idx.leading_zeros());
240         aia.set_hart_bits(num_hart_bits)?;
241 
242         Ok(KvmKernelIrqChip {
243             vm,
244             vcpus: Arc::new(Mutex::new((0..num_vcpus).map(|_| None).collect())),
245             num_vcpus,
246             num_ids: num_ids as usize,
247             num_sources: NUM_SOURCES as usize,
248             aia,
249             device_kind: DeviceKind::RiscvAia,
250             routes: Arc::new(Mutex::new(kvm_default_irq_routing_table(
251                 NUM_SOURCES as usize,
252             ))),
253         })
254     }
255 
256     /// Attempt to create a shallow clone of this riscv64 KvmKernelIrqChip instance.
257     /// This is the arch-specific impl used by `KvmKernelIrqChip::clone()`.
arch_try_clone(&self) -> Result<Self>258     pub(super) fn arch_try_clone(&self) -> Result<Self> {
259         Ok(KvmKernelIrqChip {
260             vm: self.vm.try_clone()?,
261             vcpus: self.vcpus.clone(),
262             num_vcpus: self.num_vcpus,
263             num_ids: self.num_ids,
264             num_sources: self.num_sources,
265             aia: self.aia.try_clone()?,
266             device_kind: self.device_kind,
267             routes: self.routes.clone(),
268         })
269     }
270 }
271 
272 impl IrqChipRiscv64 for KvmKernelIrqChip {
try_box_clone(&self) -> Result<Box<dyn IrqChipRiscv64>>273     fn try_box_clone(&self) -> Result<Box<dyn IrqChipRiscv64>> {
274         Ok(Box::new(self.try_clone()?))
275     }
276 
as_irq_chip(&self) -> &dyn IrqChip277     fn as_irq_chip(&self) -> &dyn IrqChip {
278         self
279     }
280 
as_irq_chip_mut(&mut self) -> &mut dyn IrqChip281     fn as_irq_chip_mut(&mut self) -> &mut dyn IrqChip {
282         self
283     }
284 
finalize(&self) -> Result<()>285     fn finalize(&self) -> Result<()> {
286         // The kernel needs the number of vcpus finalized before setting up the address for each
287         // interrupt controller.
288         self.aia.set_aplic_addrs(self.num_vcpus)?;
289         self.aia.aia_init()?;
290         Ok(())
291     }
292 
get_num_ids_sources(&self) -> (usize, usize)293     fn get_num_ids_sources(&self) -> (usize, usize) {
294         (self.num_ids, self.num_sources)
295     }
296 }
297 
298 /// Default RiscV routing table.
kvm_default_irq_routing_table(num_sources: usize) -> Vec<IrqRoute>299 fn kvm_default_irq_routing_table(num_sources: usize) -> Vec<IrqRoute> {
300     let mut routes: Vec<IrqRoute> = Vec::new();
301 
302     for i in 0..num_sources {
303         routes.push(IrqRoute::aia_irq_route(i as u32));
304     }
305 
306     routes
307 }
308