1 // Copyright 2022, The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 //! Low-level entry and exit points of pvmfw.
16
17 use crate::config;
18 use crate::memory;
19 use core::arch::asm;
20 use core::mem::size_of;
21 use core::ops::Range;
22 use core::slice;
23 use log::error;
24 use log::warn;
25 use log::LevelFilter;
26 use vmbase::util::RangeExt as _;
27 use vmbase::{
28 arch::aarch64::min_dcache_line_size,
29 configure_heap, console_writeln, layout, limit_stack_size, main,
30 memory::{
31 deactivate_dynamic_page_tables, map_image_footer, unshare_all_memory,
32 unshare_all_mmio_except_uart, unshare_uart, MemoryTrackerError, SIZE_128KB, SIZE_4KB,
33 },
34 power::reboot,
35 };
36 use zeroize::Zeroize;
37
38 #[derive(Debug, Clone)]
39 pub enum RebootReason {
40 /// A malformed BCC was received.
41 InvalidBcc,
42 /// An invalid configuration was appended to pvmfw.
43 InvalidConfig,
44 /// An unexpected internal error happened.
45 InternalError,
46 /// The provided FDT was invalid.
47 InvalidFdt,
48 /// The provided payload was invalid.
49 InvalidPayload,
50 /// The provided ramdisk was invalid.
51 InvalidRamdisk,
52 /// Failed to verify the payload.
53 PayloadVerificationError,
54 /// DICE layering process failed.
55 SecretDerivationError,
56 }
57
58 impl RebootReason {
as_avf_reboot_string(&self) -> &'static str59 pub fn as_avf_reboot_string(&self) -> &'static str {
60 match self {
61 Self::InvalidBcc => "PVM_FIRMWARE_INVALID_BCC",
62 Self::InvalidConfig => "PVM_FIRMWARE_INVALID_CONFIG_DATA",
63 Self::InternalError => "PVM_FIRMWARE_INTERNAL_ERROR",
64 Self::InvalidFdt => "PVM_FIRMWARE_INVALID_FDT",
65 Self::InvalidPayload => "PVM_FIRMWARE_INVALID_PAYLOAD",
66 Self::InvalidRamdisk => "PVM_FIRMWARE_INVALID_RAMDISK",
67 Self::PayloadVerificationError => "PVM_FIRMWARE_PAYLOAD_VERIFICATION_FAILED",
68 Self::SecretDerivationError => "PVM_FIRMWARE_SECRET_DERIVATION_FAILED",
69 }
70 }
71 }
72
73 main!(start);
74 configure_heap!(SIZE_128KB);
75 limit_stack_size!(SIZE_4KB * 12);
76
77 /// Entry point for pVM firmware.
start(fdt_address: u64, payload_start: u64, payload_size: u64, _arg3: u64)78 pub fn start(fdt_address: u64, payload_start: u64, payload_size: u64, _arg3: u64) {
79 // Limitations in this function:
80 // - can't access non-pvmfw memory (only statically-mapped memory)
81 // - can't access MMIO (except the console, already configured by vmbase)
82
83 match main_wrapper(fdt_address as usize, payload_start as usize, payload_size as usize) {
84 Ok((entry, bcc, keep_uart)) => {
85 jump_to_payload(fdt_address, entry.try_into().unwrap(), bcc, keep_uart)
86 }
87 Err(e) => {
88 const REBOOT_REASON_CONSOLE: usize = 1;
89 console_writeln!(REBOOT_REASON_CONSOLE, "{}", e.as_avf_reboot_string());
90 reboot()
91 }
92 }
93
94 // if we reach this point and return, vmbase::entry::rust_entry() will call power::shutdown().
95 }
96
97 /// Sets up the environment for main() and wraps its result for start().
98 ///
99 /// Provide the abstractions necessary for start() to abort the pVM boot and for main() to run with
100 /// the assumption that its environment has been properly configured.
main_wrapper( fdt: usize, payload: usize, payload_size: usize, ) -> Result<(usize, Range<usize>, bool), RebootReason>101 fn main_wrapper(
102 fdt: usize,
103 payload: usize,
104 payload_size: usize,
105 ) -> Result<(usize, Range<usize>, bool), RebootReason> {
106 // Limitations in this function:
107 // - only access MMIO once (and while) it has been mapped and configured
108 // - only perform logging once the logger has been initialized
109 // - only access non-pvmfw memory once (and while) it has been mapped
110
111 log::set_max_level(LevelFilter::Info);
112
113 let appended_data = get_appended_data_slice().map_err(|e| {
114 error!("Failed to map the appended data: {e}");
115 RebootReason::InternalError
116 })?;
117
118 let appended = AppendedPayload::new(appended_data).ok_or_else(|| {
119 error!("No valid configuration found");
120 RebootReason::InvalidConfig
121 })?;
122
123 let config_entries = appended.get_entries();
124
125 let slices = memory::MemorySlices::new(fdt, payload, payload_size)?;
126
127 // This wrapper allows main() to be blissfully ignorant of platform details.
128 let (next_bcc, debuggable_payload) = crate::main(
129 slices.fdt,
130 slices.kernel,
131 slices.ramdisk,
132 config_entries.bcc,
133 config_entries.debug_policy,
134 config_entries.vm_dtbo,
135 config_entries.vm_ref_dt,
136 )?;
137 // Keep UART MMIO_GUARD-ed for debuggable payloads, to enable earlycon.
138 let keep_uart = cfg!(debuggable_vms_improvements) && debuggable_payload;
139
140 // Writable-dirty regions will be flushed when MemoryTracker is dropped.
141 config_entries.bcc.zeroize();
142
143 unshare_all_mmio_except_uart().map_err(|e| {
144 error!("Failed to unshare MMIO ranges: {e}");
145 RebootReason::InternalError
146 })?;
147 unshare_all_memory();
148
149 Ok((slices.kernel.as_ptr() as usize, next_bcc, keep_uart))
150 }
151
jump_to_payload(fdt_address: u64, payload_start: u64, bcc: Range<usize>, keep_uart: bool) -> !152 fn jump_to_payload(fdt_address: u64, payload_start: u64, bcc: Range<usize>, keep_uart: bool) -> ! {
153 if !keep_uart {
154 unshare_uart().unwrap();
155 }
156
157 deactivate_dynamic_page_tables();
158
159 const ASM_STP_ALIGN: usize = size_of::<u64>() * 2;
160 const SCTLR_EL1_RES1: u64 = (0b11 << 28) | (0b101 << 20) | (0b1 << 11);
161 // Stage 1 instruction access cacheability is unaffected.
162 const SCTLR_EL1_I: u64 = 0b1 << 12;
163 // SETEND instruction disabled at EL0 in aarch32 mode.
164 const SCTLR_EL1_SED: u64 = 0b1 << 8;
165 // Various IT instructions are disabled at EL0 in aarch32 mode.
166 const SCTLR_EL1_ITD: u64 = 0b1 << 7;
167
168 const SCTLR_EL1_VAL: u64 = SCTLR_EL1_RES1 | SCTLR_EL1_ITD | SCTLR_EL1_SED | SCTLR_EL1_I;
169
170 let scratch = layout::data_bss_range();
171
172 assert_ne!(scratch.end - scratch.start, 0, "scratch memory is empty.");
173 assert_eq!(scratch.start.0 % ASM_STP_ALIGN, 0, "scratch memory is misaligned.");
174 assert_eq!(scratch.end.0 % ASM_STP_ALIGN, 0, "scratch memory is misaligned.");
175
176 assert!(bcc.is_within(&(scratch.start.0..scratch.end.0)));
177 assert_eq!(bcc.start % ASM_STP_ALIGN, 0, "Misaligned guest BCC.");
178 assert_eq!(bcc.end % ASM_STP_ALIGN, 0, "Misaligned guest BCC.");
179
180 let stack = layout::stack_range();
181
182 assert_ne!(stack.end - stack.start, 0, "stack region is empty.");
183 assert_eq!(stack.start.0 % ASM_STP_ALIGN, 0, "Misaligned stack region.");
184 assert_eq!(stack.end.0 % ASM_STP_ALIGN, 0, "Misaligned stack region.");
185
186 let eh_stack = layout::eh_stack_range();
187
188 assert_ne!(eh_stack.end - eh_stack.start, 0, "EH stack region is empty.");
189 assert_eq!(eh_stack.start.0 % ASM_STP_ALIGN, 0, "Misaligned EH stack region.");
190 assert_eq!(eh_stack.end.0 % ASM_STP_ALIGN, 0, "Misaligned EH stack region.");
191
192 // Zero all memory that could hold secrets and that can't be safely written to from Rust.
193 // Disable the exception vector, caches and page table and then jump to the payload at the
194 // given address, passing it the given FDT pointer.
195 //
196 // SAFETY: We're exiting pvmfw by passing the register values we need to a noreturn asm!().
197 unsafe {
198 asm!(
199 "cmp {scratch}, {bcc}",
200 "b.hs 1f",
201
202 // Zero .data & .bss until BCC.
203 "0: stp xzr, xzr, [{scratch}], 16",
204 "cmp {scratch}, {bcc}",
205 "b.lo 0b",
206
207 "1:",
208 // Skip BCC.
209 "mov {scratch}, {bcc_end}",
210 "cmp {scratch}, {scratch_end}",
211 "b.hs 1f",
212
213 // Keep zeroing .data & .bss.
214 "0: stp xzr, xzr, [{scratch}], 16",
215 "cmp {scratch}, {scratch_end}",
216 "b.lo 0b",
217
218 "1:",
219 // Flush d-cache over .data & .bss (including BCC).
220 "0: dc cvau, {cache_line}",
221 "add {cache_line}, {cache_line}, {dcache_line_size}",
222 "cmp {cache_line}, {scratch_end}",
223 "b.lo 0b",
224
225 "mov {cache_line}, {stack}",
226 // Zero stack region.
227 "0: stp xzr, xzr, [{stack}], 16",
228 "cmp {stack}, {stack_end}",
229 "b.lo 0b",
230
231 // Flush d-cache over stack region.
232 "0: dc cvau, {cache_line}",
233 "add {cache_line}, {cache_line}, {dcache_line_size}",
234 "cmp {cache_line}, {stack_end}",
235 "b.lo 0b",
236
237 "mov {cache_line}, {eh_stack}",
238 // Zero EH stack region.
239 "0: stp xzr, xzr, [{eh_stack}], 16",
240 "cmp {eh_stack}, {eh_stack_end}",
241 "b.lo 0b",
242
243 // Flush d-cache over EH stack region.
244 "0: dc cvau, {cache_line}",
245 "add {cache_line}, {cache_line}, {dcache_line_size}",
246 "cmp {cache_line}, {eh_stack_end}",
247 "b.lo 0b",
248
249 "msr sctlr_el1, {sctlr_el1_val}",
250 "isb",
251 "mov x1, xzr",
252 "mov x2, xzr",
253 "mov x3, xzr",
254 "mov x4, xzr",
255 "mov x5, xzr",
256 "mov x6, xzr",
257 "mov x7, xzr",
258 "mov x8, xzr",
259 "mov x9, xzr",
260 "mov x10, xzr",
261 "mov x11, xzr",
262 "mov x12, xzr",
263 "mov x13, xzr",
264 "mov x14, xzr",
265 "mov x15, xzr",
266 "mov x16, xzr",
267 "mov x17, xzr",
268 "mov x18, xzr",
269 "mov x19, xzr",
270 "mov x20, xzr",
271 "mov x21, xzr",
272 "mov x22, xzr",
273 "mov x23, xzr",
274 "mov x24, xzr",
275 "mov x25, xzr",
276 "mov x26, xzr",
277 "mov x27, xzr",
278 "mov x28, xzr",
279 "mov x29, xzr",
280 "msr ttbr0_el1, xzr",
281 // Ensure that CMOs have completed before entering payload.
282 "dsb nsh",
283 "br x30",
284 sctlr_el1_val = in(reg) SCTLR_EL1_VAL,
285 bcc = in(reg) u64::try_from(bcc.start).unwrap(),
286 bcc_end = in(reg) u64::try_from(bcc.end).unwrap(),
287 cache_line = in(reg) u64::try_from(scratch.start.0).unwrap(),
288 scratch = in(reg) u64::try_from(scratch.start.0).unwrap(),
289 scratch_end = in(reg) u64::try_from(scratch.end.0).unwrap(),
290 stack = in(reg) u64::try_from(stack.start.0).unwrap(),
291 stack_end = in(reg) u64::try_from(stack.end.0).unwrap(),
292 eh_stack = in(reg) u64::try_from(eh_stack.start.0).unwrap(),
293 eh_stack_end = in(reg) u64::try_from(eh_stack.end.0).unwrap(),
294 dcache_line_size = in(reg) u64::try_from(min_dcache_line_size()).unwrap(),
295 in("x0") fdt_address,
296 in("x30") payload_start,
297 options(noreturn),
298 );
299 };
300 }
301
get_appended_data_slice() -> Result<&'static mut [u8], MemoryTrackerError>302 fn get_appended_data_slice() -> Result<&'static mut [u8], MemoryTrackerError> {
303 let range = map_image_footer()?;
304 // SAFETY: This region was just mapped for the first time (as map_image_footer() didn't fail)
305 // and the linker script prevents it from overlapping with other objects.
306 Ok(unsafe { slice::from_raw_parts_mut(range.start as *mut u8, range.len()) })
307 }
308
309 enum AppendedPayload<'a> {
310 /// Configuration data.
311 Config(config::Config<'a>),
312 /// Deprecated raw BCC, as used in Android T.
313 LegacyBcc(&'a mut [u8]),
314 }
315
316 impl<'a> AppendedPayload<'a> {
new(data: &'a mut [u8]) -> Option<Self>317 fn new(data: &'a mut [u8]) -> Option<Self> {
318 // The borrow checker gets confused about the ownership of data (see inline comments) so we
319 // intentionally obfuscate it using a raw pointer; see a similar issue (still not addressed
320 // in v1.77) in https://users.rust-lang.org/t/78467.
321 let data_ptr = data as *mut [u8];
322
323 // Config::new() borrows data as mutable ...
324 match config::Config::new(data) {
325 // ... so this branch has a mutable reference to data, from the Ok(Config<'a>). But ...
326 Ok(valid) => Some(Self::Config(valid)),
327 // ... if Config::new(data).is_err(), the Err holds no ref to data. However ...
328 Err(config::Error::InvalidMagic) if cfg!(feature = "legacy") => {
329 // ... the borrow checker still complains about a second mutable ref without this.
330 // SAFETY: Pointer to a valid mut (not accessed elsewhere), 'a lifetime re-used.
331 let data: &'a mut _ = unsafe { &mut *data_ptr };
332
333 const BCC_SIZE: usize = SIZE_4KB;
334 warn!("Assuming the appended data at {:?} to be a raw BCC", data.as_ptr());
335 Some(Self::LegacyBcc(&mut data[..BCC_SIZE]))
336 }
337 Err(e) => {
338 error!("Invalid configuration data at {data_ptr:?}: {e}");
339 None
340 }
341 }
342 }
343
get_entries(self) -> config::Entries<'a>344 fn get_entries(self) -> config::Entries<'a> {
345 match self {
346 Self::Config(cfg) => cfg.get_entries(),
347 Self::LegacyBcc(bcc) => config::Entries { bcc, ..Default::default() },
348 }
349 }
350 }
351