1 // Copyright 2023, The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 //! Validate device assignment written in crosvm DT with VM DTBO, and apply it
16 //! to platform DT.
17 //! Declared in separated libs for adding unit tests, which requires libstd.
18
19 #[cfg(test)]
20 extern crate alloc;
21
22 use alloc::collections::{BTreeMap, BTreeSet};
23 use alloc::ffi::CString;
24 use alloc::fmt;
25 use alloc::vec;
26 use alloc::vec::Vec;
27 use core::ffi::CStr;
28 use core::iter::Iterator;
29 use core::mem;
30 use core::ops::Range;
31 // TODO(b/308694211): Use hypervisor_backends::{DeviceAssigningHypervisor, Error} proper for tests.
32 #[cfg(not(test))]
33 use hypervisor_backends::DeviceAssigningHypervisor;
34 use libfdt::{Fdt, FdtError, FdtNode, FdtNodeMut, Phandle, Reg};
35 use log::error;
36 use log::warn;
37 use zerocopy::byteorder::big_endian::U32;
38 use zerocopy::FromBytes as _;
39
40 // TODO(b/308694211): Use cstr! from vmbase instead.
41 macro_rules! cstr {
42 ($str:literal) => {{
43 const S: &str = concat!($str, "\0");
44 const C: &::core::ffi::CStr = match ::core::ffi::CStr::from_bytes_with_nul(S.as_bytes()) {
45 Ok(v) => v,
46 Err(_) => panic!("string contains interior NUL"),
47 };
48 C
49 }};
50 }
51
52 // TODO(b/277993056): Keep constants derived from platform.dts in one place.
53 const CELLS_PER_INTERRUPT: usize = 3; // from /intc node in platform.dts
54
55 /// Errors in device assignment.
56 #[derive(Clone, Copy, Debug, Eq, PartialEq)]
57 pub enum DeviceAssignmentError {
58 /// Invalid VM DTBO
59 InvalidDtbo,
60 /// Invalid __symbols__
61 InvalidSymbols,
62 /// Malformed <reg>. Can't parse.
63 MalformedReg,
64 /// Missing physical <reg> of assigned device.
65 MissingReg(u64, u64),
66 /// Extra <reg> of assigned device.
67 ExtraReg(u64, u64),
68 /// Invalid virtual <reg> of assigned device.
69 InvalidReg(u64),
70 /// Token for <reg> of assigned device does not match expected value.
71 InvalidRegToken(u64, u64),
72 /// Invalid virtual <reg> size of assigned device.
73 InvalidRegSize(u64, u64),
74 /// Invalid <interrupts>
75 InvalidInterrupts,
76 /// Malformed <iommus>
77 MalformedIommus,
78 /// Invalid <iommus>
79 InvalidIommus,
80 /// Invalid phys IOMMU node
81 InvalidPhysIommu,
82 /// Invalid pvIOMMU node
83 InvalidPvIommu,
84 /// Too many pvIOMMU
85 TooManyPvIommu,
86 /// Duplicated phys IOMMU IDs exist
87 DuplicatedIommuIds,
88 /// Duplicated pvIOMMU IDs exist
89 DuplicatedPvIommuIds,
90 /// Unsupported path format. Only supports full path.
91 UnsupportedPathFormat,
92 /// Unsupported overlay target syntax. Only supports <target-path> with full path.
93 UnsupportedOverlayTarget,
94 /// Unsupported PhysIommu,
95 UnsupportedPhysIommu,
96 /// Unsupported (pvIOMMU id, vSID) duplication. Currently the pair should be unique.
97 UnsupportedPvIommusDuplication,
98 /// Unsupported (IOMMU token, SID) duplication. Currently the pair should be unique.
99 UnsupportedIommusDuplication,
100 /// Internal error
101 Internal,
102 /// Unexpected error from libfdt
103 UnexpectedFdtError(FdtError),
104 }
105
106 impl From<FdtError> for DeviceAssignmentError {
from(e: FdtError) -> Self107 fn from(e: FdtError) -> Self {
108 DeviceAssignmentError::UnexpectedFdtError(e)
109 }
110 }
111
112 impl fmt::Display for DeviceAssignmentError {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result113 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
114 match self {
115 Self::InvalidDtbo => write!(f, "Invalid DTBO"),
116 Self::InvalidSymbols => write!(
117 f,
118 "Invalid property in /__symbols__. Must point to valid assignable device node."
119 ),
120 Self::MalformedReg => write!(f, "Malformed <reg>. Can't parse"),
121 Self::MissingReg(addr, size) => {
122 write!(f, "Missing physical MMIO region: addr:{addr:#x}), size:{size:#x}")
123 }
124 Self::ExtraReg(addr, size) => {
125 write!(f, "Unexpected extra MMIO region: addr:{addr:#x}), size:{size:#x}")
126 }
127 Self::InvalidReg(addr) => {
128 write!(f, "Invalid guest MMIO granule (addr: {addr:#x})")
129 }
130 Self::InvalidRegSize(size, expected) => {
131 write!(f, "Unexpected MMIO size ({size:#x}), should be {expected:#x}")
132 }
133 Self::InvalidRegToken(token, expected) => {
134 write!(f, "Unexpected MMIO token ({token:#x}), should be {expected:#x}")
135 }
136 Self::InvalidInterrupts => write!(f, "Invalid <interrupts>"),
137 Self::MalformedIommus => write!(f, "Malformed <iommus>. Can't parse."),
138 Self::InvalidIommus => {
139 write!(f, "Invalid <iommus>. Failed to validate with hypervisor")
140 }
141 Self::InvalidPhysIommu => write!(f, "Invalid phys IOMMU node"),
142 Self::InvalidPvIommu => write!(f, "Invalid pvIOMMU node"),
143 Self::TooManyPvIommu => write!(
144 f,
145 "Too many pvIOMMU node. Insufficient pre-populated pvIOMMUs in platform DT"
146 ),
147 Self::DuplicatedIommuIds => {
148 write!(f, "Duplicated IOMMU IDs exist. IDs must unique among iommu node")
149 }
150 Self::DuplicatedPvIommuIds => {
151 write!(f, "Duplicated pvIOMMU IDs exist. IDs must unique among iommu node")
152 }
153 Self::UnsupportedPathFormat => {
154 write!(f, "Unsupported UnsupportedPathFormat. Only supports full path")
155 }
156 Self::UnsupportedOverlayTarget => {
157 write!(f, "Unsupported overlay target. Only supports 'target-path = \"/\"'")
158 }
159 Self::UnsupportedPhysIommu => {
160 write!(f, "Unsupported Phys IOMMU. Currently only supports #iommu-cells = <1>")
161 }
162 Self::UnsupportedPvIommusDuplication => {
163 write!(f, "Unsupported (pvIOMMU id, vSID) duplication. Currently the pair should be unique.")
164 }
165 Self::UnsupportedIommusDuplication => {
166 write!(f, "Unsupported (IOMMU token, SID) duplication. Currently the pair should be unique.")
167 }
168 Self::Internal => write!(f, "Internal error"),
169 Self::UnexpectedFdtError(e) => write!(f, "Unexpected Error from libfdt: {e}"),
170 }
171 }
172 }
173
174 pub type Result<T> = core::result::Result<T, DeviceAssignmentError>;
175
176 #[derive(Clone, Default, Ord, PartialOrd, Eq, PartialEq)]
177 pub struct DtPathTokens<'a> {
178 tokens: Vec<&'a [u8]>,
179 }
180
181 impl<'a> fmt::Debug for DtPathTokens<'a> {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result182 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
183 let mut list = f.debug_list();
184 for token in &self.tokens {
185 let mut bytes = token.to_vec();
186 bytes.push(b'\0');
187 match CString::from_vec_with_nul(bytes) {
188 Ok(string) => list.entry(&string),
189 Err(_) => list.entry(token),
190 };
191 }
192 list.finish()
193 }
194 }
195
196 impl<'a> DtPathTokens<'a> {
new(path: &'a CStr) -> Result<Self>197 fn new(path: &'a CStr) -> Result<Self> {
198 if path.to_bytes().first() != Some(&b'/') {
199 return Err(DeviceAssignmentError::UnsupportedPathFormat);
200 }
201 let tokens: Vec<_> = path
202 .to_bytes()
203 .split(|char| *char == b'/')
204 .filter(|&component| !component.is_empty())
205 .collect();
206 Ok(Self { tokens })
207 }
208
to_overlay_target_path(&self) -> Result<Self>209 fn to_overlay_target_path(&self) -> Result<Self> {
210 if !self.is_overlayable_node() {
211 return Err(DeviceAssignmentError::InvalidDtbo);
212 }
213 Ok(Self { tokens: self.tokens.as_slice()[2..].to_vec() })
214 }
215
to_cstring(&self) -> CString216 fn to_cstring(&self) -> CString {
217 if self.tokens.is_empty() {
218 return CString::new(*b"/\0").unwrap();
219 }
220
221 let size = self.tokens.iter().fold(0, |sum, token| sum + token.len() + 1);
222 let mut path = Vec::with_capacity(size + 1);
223 for token in &self.tokens {
224 path.push(b'/');
225 path.extend_from_slice(token);
226 }
227 path.push(b'\0');
228
229 CString::from_vec_with_nul(path).unwrap()
230 }
231
is_overlayable_node(&self) -> bool232 fn is_overlayable_node(&self) -> bool {
233 self.tokens.get(1) == Some(&&b"__overlay__"[..])
234 }
235 }
236
237 #[derive(Debug, Eq, PartialEq)]
238 enum DeviceTreeChildrenMask {
239 Partial(Vec<DeviceTreeMask>),
240 All,
241 }
242
243 #[derive(Eq, PartialEq)]
244 struct DeviceTreeMask {
245 name_bytes: Vec<u8>,
246 children: DeviceTreeChildrenMask,
247 }
248
249 impl fmt::Debug for DeviceTreeMask {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result250 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
251 let name_bytes = [self.name_bytes.as_slice(), b"\0"].concat();
252
253 f.debug_struct("DeviceTreeMask")
254 .field("name", &CStr::from_bytes_with_nul(&name_bytes).unwrap())
255 .field("children", &self.children)
256 .finish()
257 }
258 }
259
260 impl DeviceTreeMask {
new() -> Self261 fn new() -> Self {
262 Self { name_bytes: b"/".to_vec(), children: DeviceTreeChildrenMask::Partial(Vec::new()) }
263 }
264
mask_internal(&mut self, path: &DtPathTokens, leaf_mask: DeviceTreeChildrenMask) -> bool265 fn mask_internal(&mut self, path: &DtPathTokens, leaf_mask: DeviceTreeChildrenMask) -> bool {
266 let mut iter = self;
267 let mut newly_masked = false;
268 'next_token: for path_token in &path.tokens {
269 let DeviceTreeChildrenMask::Partial(ref mut children) = &mut iter.children else {
270 return false;
271 };
272
273 // Note: Can't use iterator for 'get or insert'. (a.k.a. polonius Rust)
274 #[allow(clippy::needless_range_loop)]
275 for i in 0..children.len() {
276 if children[i].name_bytes.as_slice() == *path_token {
277 iter = &mut children[i];
278 newly_masked = false;
279 continue 'next_token;
280 }
281 }
282 let child = Self {
283 name_bytes: path_token.to_vec(),
284 children: DeviceTreeChildrenMask::Partial(Vec::new()),
285 };
286 children.push(child);
287 newly_masked = true;
288 iter = children.last_mut().unwrap()
289 }
290 iter.children = leaf_mask;
291 newly_masked
292 }
293
mask(&mut self, path: &DtPathTokens) -> bool294 fn mask(&mut self, path: &DtPathTokens) -> bool {
295 self.mask_internal(path, DeviceTreeChildrenMask::Partial(Vec::new()))
296 }
297
mask_all(&mut self, path: &DtPathTokens)298 fn mask_all(&mut self, path: &DtPathTokens) {
299 self.mask_internal(path, DeviceTreeChildrenMask::All);
300 }
301 }
302
303 /// Represents VM DTBO
304 #[repr(transparent)]
305 pub struct VmDtbo(Fdt);
306
307 impl VmDtbo {
308 /// Wraps a mutable slice containing a VM DTBO.
309 ///
310 /// Fails if the VM DTBO does not pass validation.
from_mut_slice(dtbo: &mut [u8]) -> Result<&mut Self>311 pub fn from_mut_slice(dtbo: &mut [u8]) -> Result<&mut Self> {
312 // This validates DTBO
313 let fdt = Fdt::from_mut_slice(dtbo)?;
314 // SAFETY: VmDtbo is a transparent wrapper around Fdt, so representation is the same.
315 Ok(unsafe { mem::transmute::<&mut Fdt, &mut Self>(fdt) })
316 }
317
318 // Locates device node path as if the given dtbo node path is assigned and VM DTBO is overlaid.
319 // For given dtbo node path, this concatenates <target-path> of the enclosing fragment and
320 // relative path from __overlay__ node.
321 //
322 // Here's an example with sample VM DTBO:
323 // / {
324 // fragment@rng {
325 // target-path = "/"; // Always 'target-path = "/"'. Disallows <target> or other path.
326 // __overlay__ {
327 // rng { ... }; // Actual device node is here. If overlaid, path would be "/rng"
328 // };
329 // };
330 // __symbols__ { // Contains list of assignable devices
331 // rng = "/fragment@rng/__overlay__/rng";
332 // };
333 // };
334 //
335 // Then locate_overlay_target_path(cstr!("/fragment@rng/__overlay__/rng")) is Ok("/rng")
336 //
337 // Contrary to fdt_overlay_target_offset(), this API enforces overlay target property
338 // 'target-path = "/"', so the overlay doesn't modify and/or append platform DT's existing
339 // node and/or properties. The enforcement is for compatibility reason.
locate_overlay_target_path( &self, dtbo_node_path: &DtPathTokens, dtbo_node: &FdtNode, ) -> Result<CString>340 fn locate_overlay_target_path(
341 &self,
342 dtbo_node_path: &DtPathTokens,
343 dtbo_node: &FdtNode,
344 ) -> Result<CString> {
345 let fragment_node = dtbo_node.supernode_at_depth(1)?;
346 let target_path = fragment_node
347 .getprop_str(cstr!("target-path"))?
348 .ok_or(DeviceAssignmentError::InvalidDtbo)?;
349 if target_path != cstr!("/") {
350 return Err(DeviceAssignmentError::UnsupportedOverlayTarget);
351 }
352
353 let overlaid_path = dtbo_node_path.to_overlay_target_path()?;
354 Ok(overlaid_path.to_cstring())
355 }
356
parse_physical_iommus(physical_node: &FdtNode) -> Result<BTreeMap<Phandle, PhysIommu>>357 fn parse_physical_iommus(physical_node: &FdtNode) -> Result<BTreeMap<Phandle, PhysIommu>> {
358 let mut phys_iommus = BTreeMap::new();
359 for (node, _) in physical_node.descendants() {
360 let Some(phandle) = node.get_phandle()? else {
361 continue; // Skips unreachable IOMMU node
362 };
363 let Some(iommu) = PhysIommu::parse(&node)? else {
364 continue; // Skip if not a PhysIommu.
365 };
366 if phys_iommus.insert(phandle, iommu).is_some() {
367 return Err(FdtError::BadPhandle.into());
368 }
369 }
370 Self::validate_physical_iommus(&phys_iommus)?;
371 Ok(phys_iommus)
372 }
373
validate_physical_iommus(phys_iommus: &BTreeMap<Phandle, PhysIommu>) -> Result<()>374 fn validate_physical_iommus(phys_iommus: &BTreeMap<Phandle, PhysIommu>) -> Result<()> {
375 let unique_iommus: BTreeSet<_> = phys_iommus.values().cloned().collect();
376 if phys_iommus.len() != unique_iommus.len() {
377 return Err(DeviceAssignmentError::DuplicatedIommuIds);
378 }
379 Ok(())
380 }
381
validate_physical_devices( physical_devices: &BTreeMap<Phandle, PhysicalDeviceInfo>, ) -> Result<()>382 fn validate_physical_devices(
383 physical_devices: &BTreeMap<Phandle, PhysicalDeviceInfo>,
384 ) -> Result<()> {
385 // Only need to validate iommus because <reg> will be validated together with PV <reg>
386 // see: DeviceAssignmentInfo::validate_all_regs().
387 let mut all_iommus = BTreeSet::new();
388 for physical_device in physical_devices.values() {
389 for iommu in &physical_device.iommus {
390 if !all_iommus.insert(iommu) {
391 error!("Unsupported phys IOMMU duplication found, <iommus> = {iommu:?}");
392 return Err(DeviceAssignmentError::UnsupportedIommusDuplication);
393 }
394 }
395 }
396 Ok(())
397 }
398
parse_physical_devices_with_iommus( physical_node: &FdtNode, phys_iommus: &BTreeMap<Phandle, PhysIommu>, ) -> Result<BTreeMap<Phandle, PhysicalDeviceInfo>>399 fn parse_physical_devices_with_iommus(
400 physical_node: &FdtNode,
401 phys_iommus: &BTreeMap<Phandle, PhysIommu>,
402 ) -> Result<BTreeMap<Phandle, PhysicalDeviceInfo>> {
403 let mut physical_devices = BTreeMap::new();
404 for (node, _) in physical_node.descendants() {
405 let Some(info) = PhysicalDeviceInfo::parse(&node, phys_iommus)? else {
406 continue;
407 };
408 if physical_devices.insert(info.target, info).is_some() {
409 return Err(DeviceAssignmentError::InvalidDtbo);
410 }
411 }
412 Self::validate_physical_devices(&physical_devices)?;
413 Ok(physical_devices)
414 }
415
416 /// Parses Physical devices in VM DTBO
parse_physical_devices(&self) -> Result<BTreeMap<Phandle, PhysicalDeviceInfo>>417 fn parse_physical_devices(&self) -> Result<BTreeMap<Phandle, PhysicalDeviceInfo>> {
418 let Some(physical_node) = self.as_ref().node(cstr!("/host"))? else {
419 return Ok(BTreeMap::new());
420 };
421
422 let phys_iommus = Self::parse_physical_iommus(&physical_node)?;
423 Self::parse_physical_devices_with_iommus(&physical_node, &phys_iommus)
424 }
425
node(&self, path: &DtPathTokens) -> Result<Option<FdtNode>>426 fn node(&self, path: &DtPathTokens) -> Result<Option<FdtNode>> {
427 let mut node = self.as_ref().root();
428 for token in &path.tokens {
429 let Some(subnode) = node.subnode_with_name_bytes(token)? else {
430 return Ok(None);
431 };
432 node = subnode;
433 }
434 Ok(Some(node))
435 }
436
collect_overlayable_nodes_with_phandle(&self) -> Result<BTreeMap<Phandle, DtPathTokens>>437 fn collect_overlayable_nodes_with_phandle(&self) -> Result<BTreeMap<Phandle, DtPathTokens>> {
438 let mut paths = BTreeMap::new();
439 let mut path: DtPathTokens = Default::default();
440 let root = self.as_ref().root();
441 for (node, depth) in root.descendants() {
442 path.tokens.truncate(depth - 1);
443 path.tokens.push(node.name()?.to_bytes());
444 if !path.is_overlayable_node() {
445 continue;
446 }
447 if let Some(phandle) = node.get_phandle()? {
448 paths.insert(phandle, path.clone());
449 }
450 }
451 Ok(paths)
452 }
453
collect_phandle_references_from_overlayable_nodes( &self, ) -> Result<BTreeMap<DtPathTokens, Vec<Phandle>>>454 fn collect_phandle_references_from_overlayable_nodes(
455 &self,
456 ) -> Result<BTreeMap<DtPathTokens, Vec<Phandle>>> {
457 const CELL_SIZE: usize = core::mem::size_of::<u32>();
458
459 let vm_dtbo = self.as_ref();
460
461 let mut phandle_map = BTreeMap::new();
462 let Some(local_fixups) = vm_dtbo.node(cstr!("/__local_fixups__"))? else {
463 return Ok(phandle_map);
464 };
465
466 let mut path: DtPathTokens = Default::default();
467 for (fixup_node, depth) in local_fixups.descendants() {
468 let node_name = fixup_node.name()?;
469 path.tokens.truncate(depth - 1);
470 path.tokens.push(node_name.to_bytes());
471 if path.tokens.len() != depth {
472 return Err(DeviceAssignmentError::Internal);
473 }
474 if !path.is_overlayable_node() {
475 continue;
476 }
477 let target_node = self.node(&path)?.ok_or(DeviceAssignmentError::InvalidDtbo)?;
478
479 let mut phandles = vec![];
480 for fixup_prop in fixup_node.properties()? {
481 let target_prop = target_node
482 .getprop(fixup_prop.name()?)
483 .or(Err(DeviceAssignmentError::InvalidDtbo))?
484 .ok_or(DeviceAssignmentError::InvalidDtbo)?;
485 let fixup_prop_values = fixup_prop.value()?;
486 if fixup_prop_values.is_empty() || fixup_prop_values.len() % CELL_SIZE != 0 {
487 return Err(DeviceAssignmentError::InvalidDtbo);
488 }
489
490 for fixup_prop_cell in fixup_prop_values.chunks(CELL_SIZE) {
491 let phandle_offset: usize = u32::from_be_bytes(
492 fixup_prop_cell.try_into().or(Err(DeviceAssignmentError::InvalidDtbo))?,
493 )
494 .try_into()
495 .or(Err(DeviceAssignmentError::InvalidDtbo))?;
496 if phandle_offset % CELL_SIZE != 0 {
497 return Err(DeviceAssignmentError::InvalidDtbo);
498 }
499 let phandle_value = target_prop
500 .get(phandle_offset..phandle_offset + CELL_SIZE)
501 .ok_or(DeviceAssignmentError::InvalidDtbo)?;
502 let phandle: Phandle = U32::ref_from(phandle_value)
503 .unwrap()
504 .get()
505 .try_into()
506 .or(Err(DeviceAssignmentError::InvalidDtbo))?;
507
508 phandles.push(phandle);
509 }
510 }
511 if !phandles.is_empty() {
512 phandle_map.insert(path.clone(), phandles);
513 }
514 }
515
516 Ok(phandle_map)
517 }
518
build_mask(&self, assigned_devices: Vec<DtPathTokens>) -> Result<DeviceTreeMask>519 fn build_mask(&self, assigned_devices: Vec<DtPathTokens>) -> Result<DeviceTreeMask> {
520 if assigned_devices.is_empty() {
521 return Err(DeviceAssignmentError::Internal);
522 }
523
524 let dependencies = self.collect_phandle_references_from_overlayable_nodes()?;
525 let paths = self.collect_overlayable_nodes_with_phandle()?;
526
527 let mut mask = DeviceTreeMask::new();
528 let mut stack = assigned_devices;
529 while let Some(path) = stack.pop() {
530 if !mask.mask(&path) {
531 continue;
532 }
533 let Some(dst_phandles) = dependencies.get(&path) else {
534 continue;
535 };
536 for dst_phandle in dst_phandles {
537 let dst_path = paths.get(dst_phandle).ok_or(DeviceAssignmentError::Internal)?;
538 stack.push(dst_path.clone());
539 }
540 }
541
542 Ok(mask)
543 }
544 }
545
filter_dangling_symbols(fdt: &mut Fdt) -> Result<()>546 fn filter_dangling_symbols(fdt: &mut Fdt) -> Result<()> {
547 if let Some(symbols) = fdt.symbols()? {
548 let mut removed = vec![];
549 for prop in symbols.properties()? {
550 let path = CStr::from_bytes_with_nul(prop.value()?)
551 .map_err(|_| DeviceAssignmentError::Internal)?;
552 if fdt.node(path)?.is_none() {
553 let name = prop.name()?;
554 removed.push(CString::from(name));
555 }
556 }
557
558 let mut symbols = fdt.symbols_mut()?.unwrap();
559 for name in removed {
560 symbols.nop_property(&name)?;
561 }
562 }
563 Ok(())
564 }
565
566 impl AsRef<Fdt> for VmDtbo {
as_ref(&self) -> &Fdt567 fn as_ref(&self) -> &Fdt {
568 &self.0
569 }
570 }
571
572 impl AsMut<Fdt> for VmDtbo {
as_mut(&mut self) -> &mut Fdt573 fn as_mut(&mut self) -> &mut Fdt {
574 &mut self.0
575 }
576 }
577
578 // Filter any node that isn't masked by DeviceTreeMask.
filter_with_mask(anchor: FdtNodeMut, mask: &DeviceTreeMask) -> Result<()>579 fn filter_with_mask(anchor: FdtNodeMut, mask: &DeviceTreeMask) -> Result<()> {
580 let mut stack = vec![mask];
581 let mut iter = anchor.next_node(0)?;
582 while let Some((node, depth)) = iter {
583 stack.truncate(depth);
584 let parent_mask = stack.last().unwrap();
585 let DeviceTreeChildrenMask::Partial(parent_mask_children) = &parent_mask.children else {
586 // Shouldn't happen. We only step-in if parent has DeviceTreeChildrenMask::Partial.
587 return Err(DeviceAssignmentError::Internal);
588 };
589
590 let name = node.as_node().name()?.to_bytes();
591 let mask = parent_mask_children.iter().find(|child_mask| child_mask.name_bytes == name);
592 if let Some(masked) = mask {
593 if let DeviceTreeChildrenMask::Partial(_) = &masked.children {
594 // This node is partially masked. Stepping-in.
595 stack.push(masked);
596 iter = node.next_node(depth)?;
597 } else {
598 // This node is fully masked. Stepping-out.
599 iter = node.next_node_skip_subnodes(depth)?;
600 }
601 } else {
602 // This node isn't masked.
603 iter = node.delete_and_next_node(depth)?;
604 }
605 }
606
607 Ok(())
608 }
609
610 #[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
611 struct PvIommu {
612 // ID from pvIOMMU node
613 id: u32,
614 }
615
616 impl PvIommu {
parse(node: &FdtNode) -> Result<Self>617 fn parse(node: &FdtNode) -> Result<Self> {
618 let iommu_cells = node
619 .getprop_u32(cstr!("#iommu-cells"))?
620 .ok_or(DeviceAssignmentError::InvalidPvIommu)?;
621 // Ensures #iommu-cells = <1>. It means that `<iommus>` entry contains pair of
622 // (pvIOMMU ID, vSID)
623 if iommu_cells != 1 {
624 return Err(DeviceAssignmentError::InvalidPvIommu);
625 }
626 let id = node.getprop_u32(cstr!("id"))?.ok_or(DeviceAssignmentError::InvalidPvIommu)?;
627 Ok(Self { id })
628 }
629 }
630
631 #[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
632 struct Vsid(u32);
633
634 #[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
635 struct Sid(u64);
636
637 impl From<u32> for Sid {
from(sid: u32) -> Self638 fn from(sid: u32) -> Self {
639 Self(sid.into())
640 }
641 }
642
643 #[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
644 struct DeviceReg {
645 addr: u64,
646 size: u64,
647 }
648
649 impl DeviceReg {
overlaps(&self, range: &Range<u64>) -> bool650 pub fn overlaps(&self, range: &Range<u64>) -> bool {
651 self.addr < range.end && range.start < self.addr.checked_add(self.size).unwrap()
652 }
653
is_aligned(&self, granule: u64) -> bool654 pub fn is_aligned(&self, granule: u64) -> bool {
655 self.addr % granule == 0 && self.size % granule == 0
656 }
657 }
658
659 impl TryFrom<Reg<u64>> for DeviceReg {
660 type Error = DeviceAssignmentError;
661
try_from(reg: Reg<u64>) -> Result<Self>662 fn try_from(reg: Reg<u64>) -> Result<Self> {
663 Ok(Self { addr: reg.addr, size: reg.size.ok_or(DeviceAssignmentError::MalformedReg)? })
664 }
665 }
666
parse_node_reg(node: &FdtNode) -> Result<Vec<DeviceReg>>667 fn parse_node_reg(node: &FdtNode) -> Result<Vec<DeviceReg>> {
668 node.reg()?
669 .ok_or(DeviceAssignmentError::MalformedReg)?
670 .map(DeviceReg::try_from)
671 .collect::<Result<Vec<_>>>()
672 }
673
to_be_bytes(reg: &[DeviceReg]) -> Vec<u8>674 fn to_be_bytes(reg: &[DeviceReg]) -> Vec<u8> {
675 let mut reg_cells = vec![];
676 for x in reg {
677 reg_cells.extend_from_slice(&x.addr.to_be_bytes());
678 reg_cells.extend_from_slice(&x.size.to_be_bytes());
679 }
680 reg_cells
681 }
682
683 #[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
684 struct PhysIommu {
685 token: u64,
686 }
687
688 impl PhysIommu {
parse(node: &FdtNode) -> Result<Option<Self>>689 fn parse(node: &FdtNode) -> Result<Option<Self>> {
690 let Some(token) = node.getprop_u64(cstr!("android,pvmfw,token"))? else {
691 return Ok(None);
692 };
693 let Some(iommu_cells) = node.getprop_u32(cstr!("#iommu-cells"))? else {
694 return Err(DeviceAssignmentError::InvalidPhysIommu);
695 };
696 // Currently only supports #iommu-cells = <1>.
697 // In that case `<iommus>` entry contains pair of (pIOMMU phandle, Sid token)
698 if iommu_cells != 1 {
699 return Err(DeviceAssignmentError::UnsupportedPhysIommu);
700 }
701 Ok(Some(Self { token }))
702 }
703 }
704
705 #[derive(Debug)]
706 struct PhysicalDeviceInfo {
707 target: Phandle,
708 reg: Vec<DeviceReg>,
709 iommus: Vec<(PhysIommu, Sid)>,
710 }
711
712 impl PhysicalDeviceInfo {
parse_iommus( node: &FdtNode, phys_iommus: &BTreeMap<Phandle, PhysIommu>, ) -> Result<Vec<(PhysIommu, Sid)>>713 fn parse_iommus(
714 node: &FdtNode,
715 phys_iommus: &BTreeMap<Phandle, PhysIommu>,
716 ) -> Result<Vec<(PhysIommu, Sid)>> {
717 let mut iommus = vec![];
718 let Some(mut cells) = node.getprop_cells(cstr!("iommus"))? else {
719 return Ok(iommus);
720 };
721 while let Some(cell) = cells.next() {
722 // Parse pIOMMU ID
723 let phandle =
724 Phandle::try_from(cell).or(Err(DeviceAssignmentError::MalformedIommus))?;
725 let iommu = phys_iommus.get(&phandle).ok_or(DeviceAssignmentError::MalformedIommus)?;
726
727 // Parse Sid
728 let Some(cell) = cells.next() else {
729 return Err(DeviceAssignmentError::MalformedIommus);
730 };
731
732 iommus.push((*iommu, Sid::from(cell)));
733 }
734 Ok(iommus)
735 }
736
parse(node: &FdtNode, phys_iommus: &BTreeMap<Phandle, PhysIommu>) -> Result<Option<Self>>737 fn parse(node: &FdtNode, phys_iommus: &BTreeMap<Phandle, PhysIommu>) -> Result<Option<Self>> {
738 let Some(phandle) = node.getprop_u32(cstr!("android,pvmfw,target"))? else {
739 return Ok(None);
740 };
741 let target = Phandle::try_from(phandle)?;
742 let reg = parse_node_reg(node)?;
743 let iommus = Self::parse_iommus(node, phys_iommus)?;
744 Ok(Some(Self { target, reg, iommus }))
745 }
746 }
747
748 /// Assigned device information parsed from crosvm DT.
749 /// Keeps everything in the owned data because underlying FDT will be reused for platform DT.
750 #[derive(Debug, Eq, PartialEq)]
751 struct AssignedDeviceInfo {
752 // Node path of assigned device (e.g. "/rng")
753 node_path: CString,
754 // <reg> property from the crosvm DT
755 reg: Vec<DeviceReg>,
756 // <interrupts> property from the crosvm DT
757 interrupts: Vec<u8>,
758 // Parsed <iommus> property from the crosvm DT. Tuple of PvIommu and vSID.
759 iommus: Vec<(PvIommu, Vsid)>,
760 }
761
762 impl AssignedDeviceInfo {
validate_reg( device_reg: &[DeviceReg], physical_device_reg: &[DeviceReg], hypervisor: &dyn DeviceAssigningHypervisor, granule: usize, ) -> Result<()>763 fn validate_reg(
764 device_reg: &[DeviceReg],
765 physical_device_reg: &[DeviceReg],
766 hypervisor: &dyn DeviceAssigningHypervisor,
767 granule: usize,
768 ) -> Result<()> {
769 let mut virt_regs = device_reg.iter();
770 let mut phys_regs = physical_device_reg.iter();
771 // TODO(b/308694211): Move this constant to vmbase::layout once vmbase is std-compatible.
772 const PVMFW_RANGE: Range<u64> = 0x7fc0_0000..0x8000_0000;
773
774 // PV reg and physical reg should have 1:1 match in order.
775 for (reg, phys_reg) in virt_regs.by_ref().zip(phys_regs.by_ref()) {
776 if !reg.is_aligned(granule.try_into().unwrap()) {
777 let DeviceReg { addr, size } = reg;
778 warn!("Assigned region ({addr:#x}, {size:#x}) not aligned to {granule:#x}");
779 // TODO(ptosi): Fix our test data so that we can return Err(...);
780 }
781 if reg.overlaps(&PVMFW_RANGE) {
782 return Err(DeviceAssignmentError::InvalidReg(reg.addr));
783 }
784 if reg.size != phys_reg.size {
785 return Err(DeviceAssignmentError::InvalidRegSize(reg.size, phys_reg.size));
786 }
787 for offset in (0..reg.size).step_by(granule) {
788 let expected_token = phys_reg.addr + offset;
789 // If this call returns successfully, hyp has mapped the MMIO granule.
790 let token = hypervisor.get_phys_mmio_token(reg.addr + offset).map_err(|e| {
791 error!("Hypervisor error while requesting MMIO token: {e}");
792 DeviceAssignmentError::InvalidReg(reg.addr)
793 })?;
794 if token != expected_token {
795 return Err(DeviceAssignmentError::InvalidRegToken(token, expected_token));
796 }
797 }
798 }
799
800 if let Some(DeviceReg { addr, size }) = virt_regs.next() {
801 return Err(DeviceAssignmentError::ExtraReg(*addr, *size));
802 }
803
804 if let Some(DeviceReg { addr, size }) = phys_regs.next() {
805 return Err(DeviceAssignmentError::MissingReg(*addr, *size));
806 }
807
808 Ok(())
809 }
810
parse_interrupts(node: &FdtNode) -> Result<Vec<u8>>811 fn parse_interrupts(node: &FdtNode) -> Result<Vec<u8>> {
812 // Validation: Validate if interrupts cell numbers are multiple of #interrupt-cells.
813 // We can't know how many interrupts would exist.
814 let interrupts_cells = node
815 .getprop_cells(cstr!("interrupts"))?
816 .ok_or(DeviceAssignmentError::InvalidInterrupts)?
817 .count();
818 if interrupts_cells % CELLS_PER_INTERRUPT != 0 {
819 return Err(DeviceAssignmentError::InvalidInterrupts);
820 }
821
822 // Once validated, keep the raw bytes so patch can be done with setprop()
823 Ok(node.getprop(cstr!("interrupts")).unwrap().unwrap().into())
824 }
825
826 // TODO(b/277993056): Also validate /__local_fixups__ to ensure that <iommus> has phandle.
parse_iommus( node: &FdtNode, pviommus: &BTreeMap<Phandle, PvIommu>, ) -> Result<Vec<(PvIommu, Vsid)>>827 fn parse_iommus(
828 node: &FdtNode,
829 pviommus: &BTreeMap<Phandle, PvIommu>,
830 ) -> Result<Vec<(PvIommu, Vsid)>> {
831 let mut iommus = vec![];
832 let Some(mut cells) = node.getprop_cells(cstr!("iommus"))? else {
833 return Ok(iommus);
834 };
835 while let Some(cell) = cells.next() {
836 // Parse pvIOMMU ID
837 let phandle =
838 Phandle::try_from(cell).or(Err(DeviceAssignmentError::MalformedIommus))?;
839 let pviommu = pviommus.get(&phandle).ok_or(DeviceAssignmentError::MalformedIommus)?;
840
841 // Parse vSID
842 let Some(cell) = cells.next() else {
843 return Err(DeviceAssignmentError::MalformedIommus);
844 };
845 let vsid = Vsid(cell);
846
847 iommus.push((*pviommu, vsid));
848 }
849 Ok(iommus)
850 }
851
validate_iommus( iommus: &[(PvIommu, Vsid)], physical_device_iommu: &[(PhysIommu, Sid)], hypervisor: &dyn DeviceAssigningHypervisor, ) -> Result<()>852 fn validate_iommus(
853 iommus: &[(PvIommu, Vsid)],
854 physical_device_iommu: &[(PhysIommu, Sid)],
855 hypervisor: &dyn DeviceAssigningHypervisor,
856 ) -> Result<()> {
857 if iommus.len() != physical_device_iommu.len() {
858 return Err(DeviceAssignmentError::InvalidIommus);
859 }
860 // pvIOMMU can be reordered, and hypervisor may not guarantee 1:1 mapping.
861 // So we need to mark what's matched or not.
862 let mut physical_device_iommu = physical_device_iommu.to_vec();
863 for (pviommu, vsid) in iommus {
864 let (id, sid) =
865 hypervisor.get_phys_iommu_token(pviommu.id.into(), vsid.0.into()).map_err(|e| {
866 error!("Hypervisor error while requesting IOMMU token ({pviommu:?}, {vsid:?}): {e}");
867 DeviceAssignmentError::InvalidIommus
868 })?;
869
870 let pos = physical_device_iommu
871 .iter()
872 .position(|(phys_iommu, phys_sid)| (phys_iommu.token, phys_sid.0) == (id, sid));
873 match pos {
874 Some(pos) => physical_device_iommu.remove(pos),
875 None => {
876 error!("Failed to validate device <iommus>. No matching phys iommu or duplicated mapping for pviommu={pviommu:?}, vsid={vsid:?}");
877 return Err(DeviceAssignmentError::InvalidIommus);
878 }
879 };
880 }
881 Ok(())
882 }
883
parse( fdt: &Fdt, vm_dtbo: &VmDtbo, dtbo_node_path: &DtPathTokens, physical_devices: &BTreeMap<Phandle, PhysicalDeviceInfo>, pviommus: &BTreeMap<Phandle, PvIommu>, hypervisor: &dyn DeviceAssigningHypervisor, granule: usize, ) -> Result<Option<Self>>884 fn parse(
885 fdt: &Fdt,
886 vm_dtbo: &VmDtbo,
887 dtbo_node_path: &DtPathTokens,
888 physical_devices: &BTreeMap<Phandle, PhysicalDeviceInfo>,
889 pviommus: &BTreeMap<Phandle, PvIommu>,
890 hypervisor: &dyn DeviceAssigningHypervisor,
891 granule: usize,
892 ) -> Result<Option<Self>> {
893 let dtbo_node =
894 vm_dtbo.node(dtbo_node_path)?.ok_or(DeviceAssignmentError::InvalidSymbols)?;
895 let node_path = vm_dtbo.locate_overlay_target_path(dtbo_node_path, &dtbo_node)?;
896
897 let Some(node) = fdt.node(&node_path)? else { return Ok(None) };
898
899 // Currently can only assign devices backed by physical devices.
900 let phandle = dtbo_node.get_phandle()?.ok_or(DeviceAssignmentError::InvalidDtbo)?;
901 let Some(physical_device) = physical_devices.get(&phandle) else {
902 // If labeled DT node isn't backed by physical device node, then just return None.
903 // It's not an error because such node can be a dependency of assignable device nodes.
904 return Ok(None);
905 };
906
907 let reg = parse_node_reg(&node)?;
908 Self::validate_reg(®, &physical_device.reg, hypervisor, granule)?;
909
910 let interrupts = Self::parse_interrupts(&node)?;
911
912 let iommus = Self::parse_iommus(&node, pviommus)?;
913 Self::validate_iommus(&iommus, &physical_device.iommus, hypervisor)?;
914
915 Ok(Some(Self { node_path, reg, interrupts, iommus }))
916 }
917
patch(&self, fdt: &mut Fdt, pviommu_phandles: &BTreeMap<PvIommu, Phandle>) -> Result<()>918 fn patch(&self, fdt: &mut Fdt, pviommu_phandles: &BTreeMap<PvIommu, Phandle>) -> Result<()> {
919 let mut dst = fdt.node_mut(&self.node_path)?.unwrap();
920 dst.setprop(cstr!("reg"), &to_be_bytes(&self.reg))?;
921 dst.setprop(cstr!("interrupts"), &self.interrupts)?;
922 let mut iommus = Vec::with_capacity(8 * self.iommus.len());
923 for (pviommu, vsid) in &self.iommus {
924 let phandle = pviommu_phandles.get(pviommu).unwrap();
925 iommus.extend_from_slice(&u32::from(*phandle).to_be_bytes());
926 iommus.extend_from_slice(&vsid.0.to_be_bytes());
927 }
928 dst.setprop(cstr!("iommus"), &iommus)?;
929
930 Ok(())
931 }
932 }
933
934 #[derive(Debug, Eq, PartialEq)]
935 pub struct DeviceAssignmentInfo {
936 pviommus: BTreeSet<PvIommu>,
937 assigned_devices: Vec<AssignedDeviceInfo>,
938 vm_dtbo_mask: DeviceTreeMask,
939 }
940
941 impl DeviceAssignmentInfo {
942 const PVIOMMU_COMPATIBLE: &'static CStr = cstr!("pkvm,pviommu");
943
944 /// Parses pvIOMMUs in fdt
945 // Note: This will validate pvIOMMU ids' uniqueness, even when unassigned.
parse_pviommus(fdt: &Fdt) -> Result<BTreeMap<Phandle, PvIommu>>946 fn parse_pviommus(fdt: &Fdt) -> Result<BTreeMap<Phandle, PvIommu>> {
947 let mut pviommus = BTreeMap::new();
948 for compatible in fdt.compatible_nodes(Self::PVIOMMU_COMPATIBLE)? {
949 let Some(phandle) = compatible.get_phandle()? else {
950 continue; // Skips unreachable pvIOMMU node
951 };
952 let pviommu = PvIommu::parse(&compatible)?;
953 if pviommus.insert(phandle, pviommu).is_some() {
954 return Err(FdtError::BadPhandle.into());
955 }
956 }
957 Ok(pviommus)
958 }
959
validate_pviommu_topology(assigned_devices: &[AssignedDeviceInfo]) -> Result<()>960 fn validate_pviommu_topology(assigned_devices: &[AssignedDeviceInfo]) -> Result<()> {
961 let mut all_iommus = BTreeSet::new();
962 for assigned_device in assigned_devices {
963 for iommu in &assigned_device.iommus {
964 if !all_iommus.insert(iommu) {
965 error!("Unsupported pvIOMMU duplication found, <iommus> = {iommu:?}");
966 return Err(DeviceAssignmentError::UnsupportedPvIommusDuplication);
967 }
968 }
969 }
970 Ok(())
971 }
972
973 // TODO(b/308694211): Remove this workaround for visibility once using
974 // vmbase::hyp::DeviceAssigningHypervisor for tests.
975 #[cfg(test)]
parse( fdt: &Fdt, vm_dtbo: &VmDtbo, hypervisor: &dyn DeviceAssigningHypervisor, granule: usize, ) -> Result<Option<Self>>976 fn parse(
977 fdt: &Fdt,
978 vm_dtbo: &VmDtbo,
979 hypervisor: &dyn DeviceAssigningHypervisor,
980 granule: usize,
981 ) -> Result<Option<Self>> {
982 Self::internal_parse(fdt, vm_dtbo, hypervisor, granule)
983 }
984
985 #[cfg(not(test))]
986 /// Parses fdt and vm_dtbo, and creates new DeviceAssignmentInfo
987 // TODO(b/277993056): Parse __local_fixups__
988 // TODO(b/277993056): Parse __fixups__
parse( fdt: &Fdt, vm_dtbo: &VmDtbo, hypervisor: &dyn DeviceAssigningHypervisor, granule: usize, ) -> Result<Option<Self>>989 pub fn parse(
990 fdt: &Fdt,
991 vm_dtbo: &VmDtbo,
992 hypervisor: &dyn DeviceAssigningHypervisor,
993 granule: usize,
994 ) -> Result<Option<Self>> {
995 Self::internal_parse(fdt, vm_dtbo, hypervisor, granule)
996 }
997
internal_parse( fdt: &Fdt, vm_dtbo: &VmDtbo, hypervisor: &dyn DeviceAssigningHypervisor, granule: usize, ) -> Result<Option<Self>>998 fn internal_parse(
999 fdt: &Fdt,
1000 vm_dtbo: &VmDtbo,
1001 hypervisor: &dyn DeviceAssigningHypervisor,
1002 granule: usize,
1003 ) -> Result<Option<Self>> {
1004 let Some(symbols_node) = vm_dtbo.as_ref().symbols()? else {
1005 // /__symbols__ should contain all assignable devices.
1006 // If empty, then nothing can be assigned.
1007 return Ok(None);
1008 };
1009
1010 let pviommus = Self::parse_pviommus(fdt)?;
1011 let unique_pviommus: BTreeSet<_> = pviommus.values().cloned().collect();
1012 if pviommus.len() != unique_pviommus.len() {
1013 return Err(DeviceAssignmentError::DuplicatedPvIommuIds);
1014 }
1015
1016 let physical_devices = vm_dtbo.parse_physical_devices()?;
1017
1018 let mut assigned_devices = vec![];
1019 let mut assigned_device_paths = vec![];
1020 for symbol_prop in symbols_node.properties()? {
1021 let symbol_prop_value = symbol_prop.value()?;
1022 let dtbo_node_path = CStr::from_bytes_with_nul(symbol_prop_value)
1023 .or(Err(DeviceAssignmentError::InvalidSymbols))?;
1024 let dtbo_node_path = DtPathTokens::new(dtbo_node_path)?;
1025 if !dtbo_node_path.is_overlayable_node() {
1026 continue;
1027 }
1028 let assigned_device = AssignedDeviceInfo::parse(
1029 fdt,
1030 vm_dtbo,
1031 &dtbo_node_path,
1032 &physical_devices,
1033 &pviommus,
1034 hypervisor,
1035 granule,
1036 )?;
1037 if let Some(assigned_device) = assigned_device {
1038 assigned_devices.push(assigned_device);
1039 assigned_device_paths.push(dtbo_node_path);
1040 }
1041 }
1042 if assigned_devices.is_empty() {
1043 return Ok(None);
1044 }
1045
1046 Self::validate_pviommu_topology(&assigned_devices)?;
1047
1048 let mut vm_dtbo_mask = vm_dtbo.build_mask(assigned_device_paths)?;
1049 vm_dtbo_mask.mask_all(&DtPathTokens::new(cstr!("/__local_fixups__"))?);
1050 vm_dtbo_mask.mask_all(&DtPathTokens::new(cstr!("/__symbols__"))?);
1051
1052 // Note: Any node without __overlay__ will be ignored by fdt_apply_overlay,
1053 // so doesn't need to be filtered.
1054
1055 Ok(Some(Self { pviommus: unique_pviommus, assigned_devices, vm_dtbo_mask }))
1056 }
1057
1058 /// Filters VM DTBO to only contain necessary information for booting pVM
filter(&self, vm_dtbo: &mut VmDtbo) -> Result<()>1059 pub fn filter(&self, vm_dtbo: &mut VmDtbo) -> Result<()> {
1060 let vm_dtbo = vm_dtbo.as_mut();
1061
1062 // Filter unused references in /__local_fixups__
1063 if let Some(local_fixups) = vm_dtbo.node_mut(cstr!("/__local_fixups__"))? {
1064 filter_with_mask(local_fixups, &self.vm_dtbo_mask)?;
1065 }
1066
1067 // Filter unused nodes in rest of tree
1068 let root = vm_dtbo.root_mut();
1069 filter_with_mask(root, &self.vm_dtbo_mask)?;
1070
1071 filter_dangling_symbols(vm_dtbo)
1072 }
1073
patch_pviommus(&self, fdt: &mut Fdt) -> Result<BTreeMap<PvIommu, Phandle>>1074 fn patch_pviommus(&self, fdt: &mut Fdt) -> Result<BTreeMap<PvIommu, Phandle>> {
1075 let mut compatible = fdt.root_mut().next_compatible(Self::PVIOMMU_COMPATIBLE)?;
1076 let mut pviommu_phandles = BTreeMap::new();
1077
1078 for pviommu in &self.pviommus {
1079 let mut node = compatible.ok_or(DeviceAssignmentError::TooManyPvIommu)?;
1080 let phandle = node.as_node().get_phandle()?.ok_or(DeviceAssignmentError::Internal)?;
1081 node.setprop_inplace(cstr!("id"), &pviommu.id.to_be_bytes())?;
1082 if pviommu_phandles.insert(*pviommu, phandle).is_some() {
1083 return Err(DeviceAssignmentError::Internal);
1084 }
1085 compatible = node.next_compatible(Self::PVIOMMU_COMPATIBLE)?;
1086 }
1087
1088 // Filters pre-populated but unassigned pvIOMMUs.
1089 while let Some(filtered_pviommu) = compatible {
1090 compatible = filtered_pviommu.delete_and_next_compatible(Self::PVIOMMU_COMPATIBLE)?;
1091 }
1092
1093 Ok(pviommu_phandles)
1094 }
1095
patch(&self, fdt: &mut Fdt) -> Result<()>1096 pub fn patch(&self, fdt: &mut Fdt) -> Result<()> {
1097 let pviommu_phandles = self.patch_pviommus(fdt)?;
1098
1099 // Patches assigned devices
1100 for device in &self.assigned_devices {
1101 device.patch(fdt, &pviommu_phandles)?;
1102 }
1103
1104 // Removes any dangling references in __symbols__ (e.g. removed pvIOMMUs)
1105 filter_dangling_symbols(fdt)
1106 }
1107 }
1108
1109 /// Cleans device trees not to contain any pre-populated nodes/props for device assignment.
clean(fdt: &mut Fdt) -> Result<()>1110 pub fn clean(fdt: &mut Fdt) -> Result<()> {
1111 let mut compatible = fdt.root_mut().next_compatible(cstr!("pkvm,pviommu"))?;
1112 // Filters pre-populated
1113 while let Some(filtered_pviommu) = compatible {
1114 compatible = filtered_pviommu.delete_and_next_compatible(cstr!("pkvm,pviommu"))?;
1115 }
1116
1117 // Removes any dangling references in __symbols__ (e.g. removed pvIOMMUs)
1118 filter_dangling_symbols(fdt)
1119 }
1120
1121 #[cfg(test)]
1122 #[derive(Clone, Copy, Debug)]
1123 enum MockHypervisorError {
1124 FailedGetPhysMmioToken,
1125 FailedGetPhysIommuToken,
1126 }
1127
1128 #[cfg(test)]
1129 type MockHypervisorResult<T> = core::result::Result<T, MockHypervisorError>;
1130
1131 #[cfg(test)]
1132 impl fmt::Display for MockHypervisorError {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result1133 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1134 match self {
1135 MockHypervisorError::FailedGetPhysMmioToken => {
1136 write!(f, "Failed to get physical MMIO token")
1137 }
1138 MockHypervisorError::FailedGetPhysIommuToken => {
1139 write!(f, "Failed to get physical IOMMU token")
1140 }
1141 }
1142 }
1143 }
1144
1145 #[cfg(test)]
1146 trait DeviceAssigningHypervisor {
1147 /// Returns MMIO token.
get_phys_mmio_token(&self, base_ipa: u64) -> MockHypervisorResult<u64>1148 fn get_phys_mmio_token(&self, base_ipa: u64) -> MockHypervisorResult<u64>;
1149
1150 /// Returns DMA token as a tuple of (phys_iommu_id, phys_sid).
get_phys_iommu_token(&self, pviommu_id: u64, vsid: u64) -> MockHypervisorResult<(u64, u64)>1151 fn get_phys_iommu_token(&self, pviommu_id: u64, vsid: u64) -> MockHypervisorResult<(u64, u64)>;
1152 }
1153
1154 #[cfg(test)]
1155 mod tests {
1156 use super::*;
1157 use alloc::collections::{BTreeMap, BTreeSet};
1158 use dts::Dts;
1159 use std::fs;
1160 use std::path::Path;
1161
1162 const VM_DTBO_FILE_PATH: &str = "test_pvmfw_devices_vm_dtbo.dtbo";
1163 const VM_DTBO_WITHOUT_SYMBOLS_FILE_PATH: &str =
1164 "test_pvmfw_devices_vm_dtbo_without_symbols.dtbo";
1165 const VM_DTBO_WITH_DUPLICATED_IOMMUS_FILE_PATH: &str =
1166 "test_pvmfw_devices_vm_dtbo_with_duplicated_iommus.dtbo";
1167 const VM_DTBO_WITH_DEPENDENCIES_FILE_PATH: &str =
1168 "test_pvmfw_devices_vm_dtbo_with_dependencies.dtbo";
1169 const FDT_WITHOUT_IOMMUS_FILE_PATH: &str = "test_pvmfw_devices_without_iommus.dtb";
1170 const FDT_WITHOUT_DEVICE_FILE_PATH: &str = "test_pvmfw_devices_without_device.dtb";
1171 const FDT_FILE_PATH: &str = "test_pvmfw_devices_with_rng.dtb";
1172 const FDT_WITH_DEVICE_OVERLAPPING_PVMFW: &str = "test_pvmfw_devices_overlapping_pvmfw.dtb";
1173 const FDT_WITH_MULTIPLE_DEVICES_IOMMUS_FILE_PATH: &str =
1174 "test_pvmfw_devices_with_multiple_devices_iommus.dtb";
1175 const FDT_WITH_IOMMU_SHARING: &str = "test_pvmfw_devices_with_iommu_sharing.dtb";
1176 const FDT_WITH_IOMMU_ID_CONFLICT: &str = "test_pvmfw_devices_with_iommu_id_conflict.dtb";
1177 const FDT_WITH_DUPLICATED_PVIOMMUS_FILE_PATH: &str =
1178 "test_pvmfw_devices_with_duplicated_pviommus.dtb";
1179 const FDT_WITH_MULTIPLE_REG_IOMMU_FILE_PATH: &str =
1180 "test_pvmfw_devices_with_multiple_reg_iommus.dtb";
1181 const FDT_WITH_DEPENDENCY_FILE_PATH: &str = "test_pvmfw_devices_with_dependency.dtb";
1182 const FDT_WITH_MULTIPLE_DEPENDENCIES_FILE_PATH: &str =
1183 "test_pvmfw_devices_with_multiple_dependencies.dtb";
1184 const FDT_WITH_DEPENDENCY_LOOP_FILE_PATH: &str = "test_pvmfw_devices_with_dependency_loop.dtb";
1185
1186 const EXPECTED_FDT_WITH_DEPENDENCY_FILE_PATH: &str = "expected_dt_with_dependency.dtb";
1187 const EXPECTED_FDT_WITH_MULTIPLE_DEPENDENCIES_FILE_PATH: &str =
1188 "expected_dt_with_multiple_dependencies.dtb";
1189 const EXPECTED_FDT_WITH_DEPENDENCY_LOOP_FILE_PATH: &str =
1190 "expected_dt_with_dependency_loop.dtb";
1191
1192 // TODO(b/308694211): Use vmbase::SIZE_4KB.
1193 const SIZE_4KB: usize = 4 << 10;
1194
1195 #[derive(Debug, Default)]
1196 struct MockHypervisor {
1197 mmio_tokens: BTreeMap<(u64, u64), u64>,
1198 iommu_tokens: BTreeMap<(u64, u64), (u64, u64)>,
1199 }
1200
1201 impl MockHypervisor {
1202 // TODO(ptosi): Improve these tests to cover multi-page devices.
get_mmio_token(&self, addr: u64) -> Option<&u64>1203 fn get_mmio_token(&self, addr: u64) -> Option<&u64> {
1204 // We currently only have single (or sub-) page MMIO test data so can ignore sizes.
1205 let key = self.mmio_tokens.keys().find(|(virt, _)| *virt == addr)?;
1206 self.mmio_tokens.get(key)
1207 }
1208 }
1209
1210 impl DeviceAssigningHypervisor for MockHypervisor {
get_phys_mmio_token(&self, base_ipa: u64) -> MockHypervisorResult<u64>1211 fn get_phys_mmio_token(&self, base_ipa: u64) -> MockHypervisorResult<u64> {
1212 let token = self.get_mmio_token(base_ipa);
1213
1214 Ok(*token.ok_or(MockHypervisorError::FailedGetPhysMmioToken)?)
1215 }
1216
get_phys_iommu_token( &self, pviommu_id: u64, vsid: u64, ) -> MockHypervisorResult<(u64, u64)>1217 fn get_phys_iommu_token(
1218 &self,
1219 pviommu_id: u64,
1220 vsid: u64,
1221 ) -> MockHypervisorResult<(u64, u64)> {
1222 let token = self.iommu_tokens.get(&(pviommu_id, vsid));
1223
1224 Ok(*token.ok_or(MockHypervisorError::FailedGetPhysIommuToken)?)
1225 }
1226 }
1227
1228 #[derive(Debug, Eq, PartialEq)]
1229 struct AssignedDeviceNode {
1230 path: CString,
1231 reg: Vec<u8>,
1232 interrupts: Vec<u8>,
1233 iommus: Vec<u32>, // pvIOMMU id and vSID
1234 }
1235
1236 impl AssignedDeviceNode {
parse(fdt: &Fdt, path: &CStr) -> Result<Self>1237 fn parse(fdt: &Fdt, path: &CStr) -> Result<Self> {
1238 let Some(node) = fdt.node(path)? else {
1239 return Err(FdtError::NotFound.into());
1240 };
1241
1242 let reg = node.getprop(cstr!("reg"))?.ok_or(DeviceAssignmentError::MalformedReg)?;
1243 let interrupts = node
1244 .getprop(cstr!("interrupts"))?
1245 .ok_or(DeviceAssignmentError::InvalidInterrupts)?;
1246 let mut iommus = vec![];
1247 if let Some(mut cells) = node.getprop_cells(cstr!("iommus"))? {
1248 while let Some(pviommu_id) = cells.next() {
1249 // pvIOMMU id
1250 let phandle = Phandle::try_from(pviommu_id)?;
1251 let pviommu = fdt
1252 .node_with_phandle(phandle)?
1253 .ok_or(DeviceAssignmentError::MalformedIommus)?;
1254 let compatible = pviommu.getprop_str(cstr!("compatible"));
1255 if compatible != Ok(Some(cstr!("pkvm,pviommu"))) {
1256 return Err(DeviceAssignmentError::MalformedIommus);
1257 }
1258 let id = pviommu
1259 .getprop_u32(cstr!("id"))?
1260 .ok_or(DeviceAssignmentError::MalformedIommus)?;
1261 iommus.push(id);
1262
1263 // vSID
1264 let Some(vsid) = cells.next() else {
1265 return Err(DeviceAssignmentError::MalformedIommus);
1266 };
1267 iommus.push(vsid);
1268 }
1269 }
1270 Ok(Self { path: path.into(), reg: reg.into(), interrupts: interrupts.into(), iommus })
1271 }
1272 }
1273
collect_pviommus(fdt: &Fdt) -> Result<Vec<u32>>1274 fn collect_pviommus(fdt: &Fdt) -> Result<Vec<u32>> {
1275 let mut pviommus = BTreeSet::new();
1276 for pviommu in fdt.compatible_nodes(cstr!("pkvm,pviommu"))? {
1277 if let Ok(Some(id)) = pviommu.getprop_u32(cstr!("id")) {
1278 pviommus.insert(id);
1279 }
1280 }
1281 Ok(pviommus.iter().cloned().collect())
1282 }
1283
into_fdt_prop(native_bytes: Vec<u32>) -> Vec<u8>1284 fn into_fdt_prop(native_bytes: Vec<u32>) -> Vec<u8> {
1285 let mut v = Vec::with_capacity(native_bytes.len() * 4);
1286 for byte in native_bytes {
1287 v.extend_from_slice(&byte.to_be_bytes());
1288 }
1289 v
1290 }
1291
1292 impl From<[u64; 2]> for DeviceReg {
from(fdt_cells: [u64; 2]) -> Self1293 fn from(fdt_cells: [u64; 2]) -> Self {
1294 DeviceReg { addr: fdt_cells[0], size: fdt_cells[1] }
1295 }
1296 }
1297
1298 // TODO(ptosi): Add tests with varying HYP_GRANULE values.
1299
1300 #[test]
device_info_new_without_symbols()1301 fn device_info_new_without_symbols() {
1302 let mut fdt_data = fs::read(FDT_FILE_PATH).unwrap();
1303 let mut vm_dtbo_data = fs::read(VM_DTBO_WITHOUT_SYMBOLS_FILE_PATH).unwrap();
1304 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1305 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1306
1307 let hypervisor: MockHypervisor = Default::default();
1308 const HYP_GRANULE: usize = SIZE_4KB;
1309 let device_info =
1310 DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor, HYP_GRANULE).unwrap();
1311 assert_eq!(device_info, None);
1312 }
1313
1314 #[test]
device_info_new_without_device()1315 fn device_info_new_without_device() {
1316 let mut fdt_data = fs::read(FDT_WITHOUT_DEVICE_FILE_PATH).unwrap();
1317 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1318 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1319 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1320
1321 let hypervisor: MockHypervisor = Default::default();
1322 const HYP_GRANULE: usize = SIZE_4KB;
1323 let device_info =
1324 DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor, HYP_GRANULE).unwrap();
1325 assert_eq!(device_info, None);
1326 }
1327
1328 #[test]
device_info_assigned_info_without_iommus()1329 fn device_info_assigned_info_without_iommus() {
1330 let mut fdt_data = fs::read(FDT_WITHOUT_IOMMUS_FILE_PATH).unwrap();
1331 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1332 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1333 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1334
1335 let hypervisor = MockHypervisor {
1336 mmio_tokens: [((0x9, 0xFF), 0x300)].into(),
1337 iommu_tokens: BTreeMap::new(),
1338 };
1339 const HYP_GRANULE: usize = SIZE_4KB;
1340 let device_info =
1341 DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor, HYP_GRANULE).unwrap().unwrap();
1342
1343 let expected = [AssignedDeviceInfo {
1344 node_path: CString::new("/bus0/backlight").unwrap(),
1345 reg: vec![[0x9, 0xFF].into()],
1346 interrupts: into_fdt_prop(vec![0x0, 0xF, 0x4]),
1347 iommus: vec![],
1348 }];
1349
1350 assert_eq!(device_info.assigned_devices, expected);
1351 }
1352
1353 #[test]
device_info_assigned_info()1354 fn device_info_assigned_info() {
1355 let mut fdt_data = fs::read(FDT_FILE_PATH).unwrap();
1356 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1357 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1358 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1359
1360 let hypervisor = MockHypervisor {
1361 mmio_tokens: [((0x9, 0xFF), 0x12F00000)].into(),
1362 iommu_tokens: [((0x4, 0xFF0), (0x12E40000, 0x3))].into(),
1363 };
1364 const HYP_GRANULE: usize = SIZE_4KB;
1365 let device_info =
1366 DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor, HYP_GRANULE).unwrap().unwrap();
1367
1368 let expected = [AssignedDeviceInfo {
1369 node_path: CString::new("/rng").unwrap(),
1370 reg: vec![[0x9, 0xFF].into()],
1371 interrupts: into_fdt_prop(vec![0x0, 0xF, 0x4]),
1372 iommus: vec![(PvIommu { id: 0x4 }, Vsid(0xFF0))],
1373 }];
1374
1375 assert_eq!(device_info.assigned_devices, expected);
1376 }
1377
1378 #[test]
device_info_filter()1379 fn device_info_filter() {
1380 let mut fdt_data = fs::read(FDT_FILE_PATH).unwrap();
1381 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1382 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1383 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1384
1385 let hypervisor = MockHypervisor {
1386 mmio_tokens: [((0x9, 0xFF), 0x12F00000)].into(),
1387 iommu_tokens: [((0x4, 0xFF0), (0x12E40000, 0x3))].into(),
1388 };
1389 const HYP_GRANULE: usize = SIZE_4KB;
1390 let device_info =
1391 DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor, HYP_GRANULE).unwrap().unwrap();
1392 device_info.filter(vm_dtbo).unwrap();
1393
1394 let vm_dtbo = vm_dtbo.as_mut();
1395
1396 let symbols = vm_dtbo.symbols().unwrap().unwrap();
1397
1398 let rng = vm_dtbo.node(cstr!("/fragment@0/__overlay__/rng")).unwrap();
1399 assert_ne!(rng, None);
1400 let rng_symbol = symbols.getprop_str(cstr!("rng")).unwrap();
1401 assert_eq!(Some(cstr!("/fragment@0/__overlay__/rng")), rng_symbol);
1402
1403 let light = vm_dtbo.node(cstr!("/fragment@0/__overlay__/light")).unwrap();
1404 assert_eq!(light, None);
1405 let light_symbol = symbols.getprop_str(cstr!("light")).unwrap();
1406 assert_eq!(None, light_symbol);
1407
1408 let led = vm_dtbo.node(cstr!("/fragment@0/__overlay__/led")).unwrap();
1409 assert_eq!(led, None);
1410 let led_symbol = symbols.getprop_str(cstr!("led")).unwrap();
1411 assert_eq!(None, led_symbol);
1412
1413 let backlight = vm_dtbo.node(cstr!("/fragment@0/__overlay__/bus0/backlight")).unwrap();
1414 assert_eq!(backlight, None);
1415 let backlight_symbol = symbols.getprop_str(cstr!("backlight")).unwrap();
1416 assert_eq!(None, backlight_symbol);
1417 }
1418
1419 #[test]
device_info_patch()1420 fn device_info_patch() {
1421 let mut fdt_data = fs::read(FDT_WITHOUT_IOMMUS_FILE_PATH).unwrap();
1422 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1423 let mut data = vec![0_u8; fdt_data.len() + vm_dtbo_data.len()];
1424 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1425 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1426 let platform_dt = Fdt::create_empty_tree(data.as_mut_slice()).unwrap();
1427
1428 let hypervisor = MockHypervisor {
1429 mmio_tokens: [((0x9, 0xFF), 0x300)].into(),
1430 iommu_tokens: BTreeMap::new(),
1431 };
1432 const HYP_GRANULE: usize = SIZE_4KB;
1433 let device_info =
1434 DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor, HYP_GRANULE).unwrap().unwrap();
1435 device_info.filter(vm_dtbo).unwrap();
1436
1437 // SAFETY: Damaged VM DTBO wouldn't be used after this unsafe block.
1438 unsafe {
1439 platform_dt.apply_overlay(vm_dtbo.as_mut()).unwrap();
1440 }
1441 device_info.patch(platform_dt).unwrap();
1442
1443 let rng_node = platform_dt.node(cstr!("/bus0/backlight")).unwrap().unwrap();
1444 let phandle = rng_node.getprop_u32(cstr!("phandle")).unwrap();
1445 assert_ne!(None, phandle);
1446
1447 // Note: Intentionally not using AssignedDeviceNode for matching all props.
1448 type FdtResult<T> = libfdt::Result<T>;
1449 let expected: Vec<(FdtResult<&CStr>, FdtResult<Vec<u8>>)> = vec![
1450 (Ok(cstr!("android,backlight,ignore-gctrl-reset")), Ok(Vec::new())),
1451 (Ok(cstr!("compatible")), Ok(Vec::from(*b"android,backlight\0"))),
1452 (Ok(cstr!("interrupts")), Ok(into_fdt_prop(vec![0x0, 0xF, 0x4]))),
1453 (Ok(cstr!("iommus")), Ok(Vec::new())),
1454 (Ok(cstr!("phandle")), Ok(into_fdt_prop(vec![phandle.unwrap()]))),
1455 (Ok(cstr!("reg")), Ok(into_fdt_prop(vec![0x0, 0x9, 0x0, 0xFF]))),
1456 ];
1457
1458 let mut properties: Vec<_> = rng_node
1459 .properties()
1460 .unwrap()
1461 .map(|prop| (prop.name(), prop.value().map(|x| x.into())))
1462 .collect();
1463 properties.sort_by(|a, b| {
1464 let lhs = a.0.unwrap_or_default();
1465 let rhs = b.0.unwrap_or_default();
1466 lhs.partial_cmp(rhs).unwrap()
1467 });
1468
1469 assert_eq!(properties, expected);
1470 }
1471
1472 #[test]
device_info_patch_no_pviommus()1473 fn device_info_patch_no_pviommus() {
1474 let mut fdt_data = fs::read(FDT_WITHOUT_IOMMUS_FILE_PATH).unwrap();
1475 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1476 let mut data = vec![0_u8; fdt_data.len() + vm_dtbo_data.len()];
1477 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1478 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1479 let platform_dt = Fdt::create_empty_tree(data.as_mut_slice()).unwrap();
1480
1481 let hypervisor = MockHypervisor {
1482 mmio_tokens: [((0x9, 0xFF), 0x300)].into(),
1483 iommu_tokens: BTreeMap::new(),
1484 };
1485 const HYP_GRANULE: usize = SIZE_4KB;
1486 let device_info =
1487 DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor, HYP_GRANULE).unwrap().unwrap();
1488 device_info.filter(vm_dtbo).unwrap();
1489
1490 // SAFETY: Damaged VM DTBO wouldn't be used after this unsafe block.
1491 unsafe {
1492 platform_dt.apply_overlay(vm_dtbo.as_mut()).unwrap();
1493 }
1494 device_info.patch(platform_dt).unwrap();
1495
1496 let compatible = platform_dt.root().next_compatible(cstr!("pkvm,pviommu")).unwrap();
1497 assert_eq!(None, compatible);
1498
1499 if let Some(symbols) = platform_dt.symbols().unwrap() {
1500 for prop in symbols.properties().unwrap() {
1501 let path = CStr::from_bytes_with_nul(prop.value().unwrap()).unwrap();
1502 assert_ne!(None, platform_dt.node(path).unwrap());
1503 }
1504 }
1505 }
1506
1507 #[test]
device_info_overlay_iommu()1508 fn device_info_overlay_iommu() {
1509 let mut fdt_data = fs::read(FDT_FILE_PATH).unwrap();
1510 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1511 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1512 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1513 let mut platform_dt_data = pvmfw_fdt_template::RAW.to_vec();
1514 platform_dt_data.resize(pvmfw_fdt_template::RAW.len() * 2, 0);
1515 let platform_dt = Fdt::from_mut_slice(&mut platform_dt_data).unwrap();
1516 platform_dt.unpack().unwrap();
1517
1518 let hypervisor = MockHypervisor {
1519 mmio_tokens: [((0x9, 0xFF), 0x12F00000)].into(),
1520 iommu_tokens: [((0x4, 0xFF0), (0x12E40000, 0x3))].into(),
1521 };
1522 const HYP_GRANULE: usize = SIZE_4KB;
1523 let device_info =
1524 DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor, HYP_GRANULE).unwrap().unwrap();
1525 device_info.filter(vm_dtbo).unwrap();
1526
1527 // SAFETY: Damaged VM DTBO wouldn't be used after this unsafe block.
1528 unsafe {
1529 platform_dt.apply_overlay(vm_dtbo.as_mut()).unwrap();
1530 }
1531 device_info.patch(platform_dt).unwrap();
1532
1533 let expected = AssignedDeviceNode {
1534 path: CString::new("/rng").unwrap(),
1535 reg: into_fdt_prop(vec![0x0, 0x9, 0x0, 0xFF]),
1536 interrupts: into_fdt_prop(vec![0x0, 0xF, 0x4]),
1537 iommus: vec![0x4, 0xFF0],
1538 };
1539
1540 let node = AssignedDeviceNode::parse(platform_dt, &expected.path);
1541 assert_eq!(node, Ok(expected));
1542
1543 let pviommus = collect_pviommus(platform_dt);
1544 assert_eq!(pviommus, Ok(vec![0x4]));
1545 }
1546
1547 #[test]
device_info_multiple_devices_iommus()1548 fn device_info_multiple_devices_iommus() {
1549 let mut fdt_data = fs::read(FDT_WITH_MULTIPLE_DEVICES_IOMMUS_FILE_PATH).unwrap();
1550 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1551 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1552 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1553 let mut platform_dt_data = pvmfw_fdt_template::RAW.to_vec();
1554 platform_dt_data.resize(pvmfw_fdt_template::RAW.len() * 2, 0);
1555 let platform_dt = Fdt::from_mut_slice(&mut platform_dt_data).unwrap();
1556 platform_dt.unpack().unwrap();
1557
1558 let hypervisor = MockHypervisor {
1559 mmio_tokens: [
1560 ((0x9, 0xFF), 0x12F00000),
1561 ((0x10000, 0x1000), 0xF00000),
1562 ((0x20000, 0x1000), 0xF10000),
1563 ]
1564 .into(),
1565 iommu_tokens: [
1566 ((0x4, 0xFF0), (0x12E40000, 3)),
1567 ((0x40, 0xFFA), (0x40000, 0x4)),
1568 ((0x50, 0xFFB), (0x50000, 0x5)),
1569 ]
1570 .into(),
1571 };
1572 const HYP_GRANULE: usize = SIZE_4KB;
1573 let device_info =
1574 DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor, HYP_GRANULE).unwrap().unwrap();
1575 device_info.filter(vm_dtbo).unwrap();
1576
1577 // SAFETY: Damaged VM DTBO wouldn't be used after this unsafe block.
1578 unsafe {
1579 platform_dt.apply_overlay(vm_dtbo.as_mut()).unwrap();
1580 }
1581 device_info.patch(platform_dt).unwrap();
1582
1583 let expected_devices = [
1584 AssignedDeviceNode {
1585 path: CString::new("/rng").unwrap(),
1586 reg: into_fdt_prop(vec![0x0, 0x9, 0x0, 0xFF]),
1587 interrupts: into_fdt_prop(vec![0x0, 0xF, 0x4]),
1588 iommus: vec![0x4, 0xFF0],
1589 },
1590 AssignedDeviceNode {
1591 path: CString::new("/light").unwrap(),
1592 reg: into_fdt_prop(vec![0x0, 0x10000, 0x0, 0x1000, 0x0, 0x20000, 0x0, 0x1000]),
1593 interrupts: into_fdt_prop(vec![0x0, 0xF, 0x5]),
1594 iommus: vec![0x40, 0xFFA, 0x50, 0xFFB],
1595 },
1596 ];
1597
1598 for expected in expected_devices {
1599 let node = AssignedDeviceNode::parse(platform_dt, &expected.path);
1600 assert_eq!(node, Ok(expected));
1601 }
1602 let pviommus = collect_pviommus(platform_dt);
1603 assert_eq!(pviommus, Ok(vec![0x4, 0x40, 0x50]));
1604 }
1605
1606 #[test]
device_info_iommu_sharing()1607 fn device_info_iommu_sharing() {
1608 let mut fdt_data = fs::read(FDT_WITH_IOMMU_SHARING).unwrap();
1609 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1610 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1611 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1612 let mut platform_dt_data = pvmfw_fdt_template::RAW.to_vec();
1613 platform_dt_data.resize(pvmfw_fdt_template::RAW.len() * 2, 0);
1614 let platform_dt = Fdt::from_mut_slice(&mut platform_dt_data).unwrap();
1615 platform_dt.unpack().unwrap();
1616
1617 let hypervisor = MockHypervisor {
1618 mmio_tokens: [((0x9, 0xFF), 0x12F00000), ((0x1000, 0x9), 0x12000000)].into(),
1619 iommu_tokens: [((0x4, 0xFF0), (0x12E40000, 3)), ((0x4, 0xFF1), (0x12E40000, 9))].into(),
1620 };
1621 const HYP_GRANULE: usize = SIZE_4KB;
1622 let device_info =
1623 DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor, HYP_GRANULE).unwrap().unwrap();
1624 device_info.filter(vm_dtbo).unwrap();
1625
1626 // SAFETY: Damaged VM DTBO wouldn't be used after this unsafe block.
1627 unsafe {
1628 platform_dt.apply_overlay(vm_dtbo.as_mut()).unwrap();
1629 }
1630 device_info.patch(platform_dt).unwrap();
1631
1632 let expected_devices = [
1633 AssignedDeviceNode {
1634 path: CString::new("/rng").unwrap(),
1635 reg: into_fdt_prop(vec![0x0, 0x9, 0x0, 0xFF]),
1636 interrupts: into_fdt_prop(vec![0x0, 0xF, 0x4]),
1637 iommus: vec![0x4, 0xFF0],
1638 },
1639 AssignedDeviceNode {
1640 path: CString::new("/led").unwrap(),
1641 reg: into_fdt_prop(vec![0x0, 0x1000, 0x0, 0x9]),
1642 interrupts: into_fdt_prop(vec![0x0, 0xF, 0x5]),
1643 iommus: vec![0x4, 0xFF1],
1644 },
1645 ];
1646
1647 for expected in expected_devices {
1648 let node = AssignedDeviceNode::parse(platform_dt, &expected.path);
1649 assert_eq!(node, Ok(expected));
1650 }
1651
1652 let pviommus = collect_pviommus(platform_dt);
1653 assert_eq!(pviommus, Ok(vec![0x4]));
1654 }
1655
1656 #[test]
device_info_iommu_id_conflict()1657 fn device_info_iommu_id_conflict() {
1658 let mut fdt_data = fs::read(FDT_WITH_IOMMU_ID_CONFLICT).unwrap();
1659 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1660 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1661 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1662
1663 let hypervisor = MockHypervisor {
1664 mmio_tokens: [((0x9, 0xFF), 0x300)].into(),
1665 iommu_tokens: [((0x4, 0xFF0), (0x12E40000, 0x3))].into(),
1666 };
1667 const HYP_GRANULE: usize = SIZE_4KB;
1668 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor, HYP_GRANULE);
1669
1670 assert_eq!(device_info, Err(DeviceAssignmentError::DuplicatedPvIommuIds));
1671 }
1672
1673 #[test]
device_info_invalid_reg()1674 fn device_info_invalid_reg() {
1675 let mut fdt_data = fs::read(FDT_FILE_PATH).unwrap();
1676 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1677 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1678 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1679
1680 let hypervisor = MockHypervisor {
1681 mmio_tokens: BTreeMap::new(),
1682 iommu_tokens: [((0x4, 0xFF0), (0x12E40000, 0x3))].into(),
1683 };
1684 const HYP_GRANULE: usize = SIZE_4KB;
1685 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor, HYP_GRANULE);
1686
1687 assert_eq!(device_info, Err(DeviceAssignmentError::InvalidReg(0x9)));
1688 }
1689
1690 #[test]
device_info_invalid_reg_out_of_order()1691 fn device_info_invalid_reg_out_of_order() {
1692 let mut fdt_data = fs::read(FDT_WITH_MULTIPLE_REG_IOMMU_FILE_PATH).unwrap();
1693 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1694 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1695 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1696
1697 let hypervisor = MockHypervisor {
1698 mmio_tokens: [((0xF000, 0x1000), 0xF10000), ((0xF100, 0x1000), 0xF00000)].into(),
1699 iommu_tokens: [((0xFF0, 0xF0), (0x40000, 0x4)), ((0xFF1, 0xF1), (0x50000, 0x5))].into(),
1700 };
1701 const HYP_GRANULE: usize = SIZE_4KB;
1702 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor, HYP_GRANULE);
1703
1704 assert_eq!(device_info, Err(DeviceAssignmentError::InvalidRegToken(0xF10000, 0xF00000)));
1705 }
1706
1707 #[test]
device_info_invalid_iommus()1708 fn device_info_invalid_iommus() {
1709 let mut fdt_data = fs::read(FDT_FILE_PATH).unwrap();
1710 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1711 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1712 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1713
1714 let hypervisor = MockHypervisor {
1715 mmio_tokens: [((0x9, 0xFF), 0x12F00000)].into(),
1716 iommu_tokens: BTreeMap::new(),
1717 };
1718 const HYP_GRANULE: usize = SIZE_4KB;
1719 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor, HYP_GRANULE);
1720
1721 assert_eq!(device_info, Err(DeviceAssignmentError::InvalidIommus));
1722 }
1723
1724 #[test]
device_info_duplicated_pv_iommus()1725 fn device_info_duplicated_pv_iommus() {
1726 let mut fdt_data = fs::read(FDT_WITH_DUPLICATED_PVIOMMUS_FILE_PATH).unwrap();
1727 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1728 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1729 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1730
1731 let hypervisor = MockHypervisor {
1732 mmio_tokens: [((0x10000, 0x1000), 0xF00000), ((0x20000, 0xFF), 0xF10000)].into(),
1733 iommu_tokens: [((0xFF, 0xF), (0x40000, 0x4))].into(),
1734 };
1735 const HYP_GRANULE: usize = SIZE_4KB;
1736 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor, HYP_GRANULE);
1737
1738 assert_eq!(device_info, Err(DeviceAssignmentError::DuplicatedPvIommuIds));
1739 }
1740
1741 #[test]
device_info_duplicated_iommus()1742 fn device_info_duplicated_iommus() {
1743 let mut fdt_data = fs::read(FDT_FILE_PATH).unwrap();
1744 let mut vm_dtbo_data = fs::read(VM_DTBO_WITH_DUPLICATED_IOMMUS_FILE_PATH).unwrap();
1745 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1746 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1747
1748 let hypervisor = MockHypervisor {
1749 mmio_tokens: [((0x10000, 0x1000), 0xF00000), ((0x20000, 0xFF), 0xF10000)].into(),
1750 iommu_tokens: [((0xFF, 0xF), (0x40000, 0x4))].into(),
1751 };
1752 const HYP_GRANULE: usize = SIZE_4KB;
1753 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor, HYP_GRANULE);
1754
1755 assert_eq!(device_info, Err(DeviceAssignmentError::UnsupportedIommusDuplication));
1756 }
1757
1758 #[test]
device_info_duplicated_iommu_mapping()1759 fn device_info_duplicated_iommu_mapping() {
1760 let mut fdt_data = fs::read(FDT_WITH_MULTIPLE_REG_IOMMU_FILE_PATH).unwrap();
1761 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1762 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1763 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1764
1765 let hypervisor = MockHypervisor {
1766 mmio_tokens: [((0xF000, 0x1000), 0xF00000), ((0xF100, 0x1000), 0xF10000)].into(),
1767 iommu_tokens: [((0xFF0, 0xF0), (0x40000, 0x4)), ((0xFF1, 0xF1), (0x40000, 0x4))].into(),
1768 };
1769 const HYP_GRANULE: usize = SIZE_4KB;
1770 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor, HYP_GRANULE);
1771
1772 assert_eq!(device_info, Err(DeviceAssignmentError::InvalidIommus));
1773 }
1774
1775 #[test]
device_info_overlaps_pvmfw()1776 fn device_info_overlaps_pvmfw() {
1777 let mut fdt_data = fs::read(FDT_WITH_DEVICE_OVERLAPPING_PVMFW).unwrap();
1778 let mut vm_dtbo_data = fs::read(VM_DTBO_FILE_PATH).unwrap();
1779 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1780 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1781
1782 let hypervisor = MockHypervisor {
1783 mmio_tokens: [((0x7fee0000, 0x1000), 0xF00000)].into(),
1784 iommu_tokens: [((0xFF, 0xF), (0x40000, 0x4))].into(),
1785 };
1786 const HYP_GRANULE: usize = SIZE_4KB;
1787 let device_info = DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor, HYP_GRANULE);
1788
1789 assert_eq!(device_info, Err(DeviceAssignmentError::InvalidReg(0x7fee0000)));
1790 }
1791
1792 #[test]
device_assignment_clean()1793 fn device_assignment_clean() {
1794 let mut platform_dt_data = pvmfw_fdt_template::RAW.to_vec();
1795 let platform_dt = Fdt::from_mut_slice(&mut platform_dt_data).unwrap();
1796
1797 let compatible = platform_dt.root().next_compatible(cstr!("pkvm,pviommu"));
1798 assert_ne!(None, compatible.unwrap());
1799
1800 clean(platform_dt).unwrap();
1801
1802 let compatible = platform_dt.root().next_compatible(cstr!("pkvm,pviommu"));
1803 assert_eq!(Ok(None), compatible);
1804 }
1805
1806 #[test]
device_info_dependency()1807 fn device_info_dependency() {
1808 let mut fdt_data = fs::read(FDT_WITH_DEPENDENCY_FILE_PATH).unwrap();
1809 let mut vm_dtbo_data = fs::read(VM_DTBO_WITH_DEPENDENCIES_FILE_PATH).unwrap();
1810 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1811 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1812 let mut platform_dt_data = pvmfw_fdt_template::RAW.to_vec();
1813 platform_dt_data.resize(pvmfw_fdt_template::RAW.len() * 2, 0);
1814 let platform_dt = Fdt::from_mut_slice(&mut platform_dt_data).unwrap();
1815 platform_dt.unpack().unwrap();
1816
1817 let hypervisor = MockHypervisor {
1818 mmio_tokens: [((0xFF000, 0x1), 0xF000)].into(),
1819 iommu_tokens: Default::default(),
1820 };
1821
1822 const HYP_GRANULE: usize = SIZE_4KB;
1823 let device_info =
1824 DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor, HYP_GRANULE).unwrap().unwrap();
1825 device_info.filter(vm_dtbo).unwrap();
1826
1827 // SAFETY: Damaged VM DTBO wouldn't be used after this unsafe block.
1828 unsafe {
1829 platform_dt.apply_overlay(vm_dtbo.as_mut()).unwrap();
1830 }
1831 device_info.patch(platform_dt).unwrap();
1832
1833 let expected = Dts::from_dtb(Path::new(EXPECTED_FDT_WITH_DEPENDENCY_FILE_PATH)).unwrap();
1834 let platform_dt = Dts::from_fdt(platform_dt).unwrap();
1835
1836 assert_eq!(expected, platform_dt);
1837 }
1838
1839 #[test]
device_info_multiple_dependencies()1840 fn device_info_multiple_dependencies() {
1841 let mut fdt_data = fs::read(FDT_WITH_MULTIPLE_DEPENDENCIES_FILE_PATH).unwrap();
1842 let mut vm_dtbo_data = fs::read(VM_DTBO_WITH_DEPENDENCIES_FILE_PATH).unwrap();
1843 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1844 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1845 let mut platform_dt_data = pvmfw_fdt_template::RAW.to_vec();
1846 platform_dt_data.resize(pvmfw_fdt_template::RAW.len() * 2, 0);
1847 let platform_dt = Fdt::from_mut_slice(&mut platform_dt_data).unwrap();
1848 platform_dt.unpack().unwrap();
1849
1850 let hypervisor = MockHypervisor {
1851 mmio_tokens: [((0xFF000, 0x1), 0xF000), ((0xFF100, 0x1), 0xF100)].into(),
1852 iommu_tokens: Default::default(),
1853 };
1854 const HYP_GRANULE: usize = SIZE_4KB;
1855 let device_info =
1856 DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor, HYP_GRANULE).unwrap().unwrap();
1857 device_info.filter(vm_dtbo).unwrap();
1858
1859 // SAFETY: Damaged VM DTBO wouldn't be used after this unsafe block.
1860 unsafe {
1861 platform_dt.apply_overlay(vm_dtbo.as_mut()).unwrap();
1862 }
1863 device_info.patch(platform_dt).unwrap();
1864
1865 let expected =
1866 Dts::from_dtb(Path::new(EXPECTED_FDT_WITH_MULTIPLE_DEPENDENCIES_FILE_PATH)).unwrap();
1867 let platform_dt = Dts::from_fdt(platform_dt).unwrap();
1868
1869 assert_eq!(expected, platform_dt);
1870 }
1871
1872 #[test]
device_info_dependency_loop()1873 fn device_info_dependency_loop() {
1874 let mut fdt_data = fs::read(FDT_WITH_DEPENDENCY_LOOP_FILE_PATH).unwrap();
1875 let mut vm_dtbo_data = fs::read(VM_DTBO_WITH_DEPENDENCIES_FILE_PATH).unwrap();
1876 let fdt = Fdt::from_mut_slice(&mut fdt_data).unwrap();
1877 let vm_dtbo = VmDtbo::from_mut_slice(&mut vm_dtbo_data).unwrap();
1878 let mut platform_dt_data = pvmfw_fdt_template::RAW.to_vec();
1879 platform_dt_data.resize(pvmfw_fdt_template::RAW.len() * 2, 0);
1880 let platform_dt = Fdt::from_mut_slice(&mut platform_dt_data).unwrap();
1881 platform_dt.unpack().unwrap();
1882
1883 let hypervisor = MockHypervisor {
1884 mmio_tokens: [((0xFF200, 0x1), 0xF200)].into(),
1885 iommu_tokens: Default::default(),
1886 };
1887 const HYP_GRANULE: usize = SIZE_4KB;
1888 let device_info =
1889 DeviceAssignmentInfo::parse(fdt, vm_dtbo, &hypervisor, HYP_GRANULE).unwrap().unwrap();
1890 device_info.filter(vm_dtbo).unwrap();
1891
1892 // SAFETY: Damaged VM DTBO wouldn't be used after this unsafe block.
1893 unsafe {
1894 platform_dt.apply_overlay(vm_dtbo.as_mut()).unwrap();
1895 }
1896 device_info.patch(platform_dt).unwrap();
1897
1898 let expected =
1899 Dts::from_dtb(Path::new(EXPECTED_FDT_WITH_DEPENDENCY_LOOP_FILE_PATH)).unwrap();
1900 let platform_dt = Dts::from_fdt(platform_dt).unwrap();
1901
1902 assert_eq!(expected, platform_dt);
1903 }
1904 }
1905