xref: /aosp_15_r20/bootable/libbootloader/gbl/efi/src/fuchsia_boot.rs (revision 5225e6b173e52d2efc6bcf950c27374fd72adabc)
1 // Copyright 2024, The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //     http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 use crate::utils::efi_to_zbi_mem_range_type;
16 #[allow(unused_imports)]
17 use crate::{
18     efi_blocks::find_block_devices, fastboot::fastboot, ops::Ops, utils::get_efi_mem_attr,
19 };
20 use core::fmt::Write;
21 use efi::{efi_print, efi_println, EfiEntry, EfiMemoryAttributesTable, EfiMemoryMap};
22 use efi_types::{
23     EfiMemoryAttributesTableHeader, EfiMemoryDescriptor, EFI_MEMORY_ATTRIBUTE_EMA_RUNTIME,
24 };
25 use liberror::Error;
26 use liberror::Error::BufferTooSmall;
27 use libgbl::{
28     fuchsia_boot::{zircon_check_enter_fastboot, zircon_load_verify_abr, zircon_part_name},
29     partition::check_part_unique,
30     IntegrationError::UnificationError,
31     Os, Result,
32 };
33 use safemath::SafeNum;
34 use zbi::{zbi_format::zbi_mem_range_t, ZbiContainer, ZbiFlags, ZbiType};
35 use zerocopy::{ByteSliceMut, Ref};
36 
37 const PAGE_SIZE: u64 = 4096;
38 
39 /// Check if the disk GPT layout is a Fuchsia device layout.
is_fuchsia_gpt(efi_entry: &EfiEntry) -> Result<()>40 pub fn is_fuchsia_gpt(efi_entry: &EfiEntry) -> Result<()> {
41     let gpt_devices = find_block_devices(&efi_entry)?;
42     let partitions: &[&[&str]] = &[
43         &["zircon_a", "zircon-a"],
44         &["zircon_b", "zircon-b"],
45         &["zircon_r", "zircon-r"],
46         &["vbmeta_a"],
47         &["vbmeta_b"],
48         &["vbmeta_r"],
49         &["misc", "durable_boot"],
50     ];
51     if !partitions
52         .iter()
53         .all(|&partition| partition.iter().any(|v| check_part_unique(&gpt_devices[..], *v).is_ok()))
54     {
55         return Err(Error::NotFound.into());
56     }
57 
58     Ok(())
59 }
60 
61 /// Loads, verifies and boots Fuchsia according to A/B/R.
fuchsia_boot_demo(efi_entry: EfiEntry) -> Result<()>62 pub fn fuchsia_boot_demo(efi_entry: EfiEntry) -> Result<()> {
63     efi_println!(efi_entry, "Try booting as Fuchsia/Zircon");
64 
65     let (mut zbi_items_buffer, mut _kernel_buffer, slot) = {
66         let blks = find_block_devices(&efi_entry)?;
67         let mut ops = Ops::new(&efi_entry, &blks[..], Some(Os::Fuchsia));
68         // Checks whether to enter fastboot mode.
69         if zircon_check_enter_fastboot(&mut ops) {
70             fastboot(&mut ops, &mut [])?;
71         }
72         zircon_load_verify_abr(&mut ops)?
73     };
74     efi_println!(efi_entry, "Booting from slot: {}", zircon_part_name(Some(slot)));
75 
76     let _zbi_items = zbi_items_buffer.used_mut();
77 
78     #[cfg(target_arch = "aarch64")]
79     {
80         // Uses the unused buffer for `exit_boot_services` to store output memory map.
81         // The map is not used for now. We currently rely on UEFI firmware to pass memory map via
82         // an raw zbi blob in device tree. Long term we want to support adding from EFI memory maps
83         // if none is provided.
84         let item_size = zbi::ZbiContainer::parse(&mut _zbi_items[..])?.container_size()?;
85         let (_, remains) = _zbi_items.split_at_mut(item_size);
86         let _ = efi::exit_boot_services(efi_entry, remains).unwrap();
87         // SAFETY: The kernel has passed libavb verification or device is unlocked, in which case we
88         // assume the caller has addressed all safety and security concerns.
89         unsafe { boot::aarch64::jump_zircon_el2_or_lower(_kernel_buffer.used_mut(), _zbi_items) };
90     }
91 
92     #[cfg(any(target_arch = "x86_64", target_arch = "x86"))]
93     {
94         const BUFFER_SIZE: usize = 32 * 1024 / 2;
95         let mut mem_map_buf = [0u8; BUFFER_SIZE];
96         let mut zbi_items = zbi::ZbiContainer::parse(&mut _zbi_items[..])?;
97         let efi_memory_attribute_table = get_efi_mem_attr(&efi_entry).ok_or(Error::InvalidInput)?;
98 
99         // `exit_boot_service` returnes EFI memory map that is used to derive and append MEM_CONFIG
100         // items.
101         let efi_memory_map = efi::exit_boot_services(efi_entry, &mut mem_map_buf).unwrap();
102 
103         add_memory_items(&efi_memory_map, &efi_memory_attribute_table, &mut zbi_items)?;
104 
105         // SAFETY: The kernel has passed libavb verification or device is unlocked, in which case we
106         // assume the caller has addressed all safety and security concerns.
107         unsafe { boot::x86::zbi_boot(_kernel_buffer.used_mut(), _zbi_items) };
108     }
109 
110     #[cfg(target_arch = "riscv64")]
111     {
112         unimplemented!();
113     }
114 }
115 
116 // This function must not use allocation
117 #[allow(unused)]
add_memory_items<B>( efi_memory_map: &EfiMemoryMap, efi_memory_attribute_table: &EfiMemoryAttributesTable, zbi_items: &mut ZbiContainer<B>, ) -> Result<()> where B: ByteSliceMut + PartialEq,118 fn add_memory_items<B>(
119     efi_memory_map: &EfiMemoryMap,
120     efi_memory_attribute_table: &EfiMemoryAttributesTable,
121     zbi_items: &mut ZbiContainer<B>,
122 ) -> Result<()>
123 where
124     B: ByteSliceMut + PartialEq,
125 {
126     generate_efi_memory_attributes_table_item(
127         efi_memory_map,
128         efi_memory_attribute_table,
129         zbi_items,
130     )?;
131 
132     generate_mem_config_item(efi_memory_map, zbi_items)?;
133 
134     Ok(())
135 }
136 
generate_efi_memory_attributes_table_item<'b, B>( efi_memory_map: &EfiMemoryMap<'b>, efi_memory_attribute_table: &EfiMemoryAttributesTable, zbi_items: &mut ZbiContainer<B>, ) -> Result<()> where B: ByteSliceMut + PartialEq,137 fn generate_efi_memory_attributes_table_item<'b, B>(
138     efi_memory_map: &EfiMemoryMap<'b>,
139     efi_memory_attribute_table: &EfiMemoryAttributesTable,
140     zbi_items: &mut ZbiContainer<B>,
141 ) -> Result<()>
142 where
143     B: ByteSliceMut + PartialEq,
144 {
145     let payload = zbi_items.get_next_payload()?;
146     let provided_payload_size = payload.len();
147     let (mut header, mut tail) =
148         Ref::<&mut [u8], EfiMemoryAttributesTableHeader>::new_from_prefix(payload)
149             .ok_or(Error::BadBufferSize)?;
150 
151     for efi_memory_desc in efi_memory_map.into_iter() {
152         if efi_memory_desc.attributes & EFI_MEMORY_ATTRIBUTE_EMA_RUNTIME == 0 {
153             continue;
154         }
155 
156         let mut base = efi_memory_desc.physical_start;
157         let mut size: u64 = (SafeNum::from(efi_memory_desc.number_of_pages) * PAGE_SIZE)
158             .try_into()
159             .map_err(Error::from)?;
160 
161         // This EMAT entry is either a sub-region or a full copy of the memory map region, per
162         // EFI 2.10 4.6.4: "Additionally, every memory region described by a Descriptor in
163         // EFI_MEMORY_ATTRIBUTES_TABLE must be a sub-region of, or equal to, a descriptor in the
164         // table produced by GetMemoryMap()."
165         //
166         // This means that we do not have to consider the case where the EMAT entry only overlaps
167         // the end of the memory map entry.
168         //
169         // EMAT items are ordered by physical address, so once we go past |base| we can quit the
170         // loop.
171         for emat_item in efi_memory_attribute_table
172             .into_iter()
173             .skip_while(move |item| item.physical_start < base)
174             .take_while(move |item| item.physical_start < base + size)
175         {
176             if emat_item.physical_start > base {
177                 // Create a region for [base ... emat_item->PhysicalStart), because that region is
178                 // not covered by the EMAT.
179                 let mut generated_item;
180                 (generated_item, tail) = Ref::<_, EfiMemoryDescriptor>::new_from_prefix(tail)
181                     .ok_or(UnificationError(BufferTooSmall(Some(
182                         size_of::<EfiMemoryDescriptor>(),
183                     ))))?;
184 
185                 generated_item.physical_start = base;
186                 generated_item.number_of_pages = (emat_item.physical_start - base) / PAGE_SIZE;
187                 generated_item.virtual_start = 0;
188                 generated_item.attributes = EFI_MEMORY_ATTRIBUTE_EMA_RUNTIME;
189                 generated_item.memory_type = emat_item.memory_type;
190 
191                 // Adjust base and size forward.
192                 size -= emat_item.physical_start - base;
193                 base = emat_item.physical_start;
194             } else {
195                 // emat_item.physical_start == base
196                 // Create a region for [base ... emat_item->NumberOfPages * PAGE_SIZE)
197                 let mut generated_item;
198                 (generated_item, tail) = Ref::<_, EfiMemoryDescriptor>::new_from_prefix(tail)
199                     .ok_or(UnificationError(BufferTooSmall(Some(
200                         size_of::<EfiMemoryDescriptor>(),
201                     ))))?;
202                 *generated_item = *emat_item;
203 
204                 // Adjust base and size forward.
205                 base += emat_item.number_of_pages * PAGE_SIZE;
206                 size -= emat_item.number_of_pages * PAGE_SIZE;
207             }
208         }
209 
210         if size != 0 {
211             let mut generated_item;
212             (generated_item, tail) = Ref::<_, EfiMemoryDescriptor>::new_from_prefix(tail)
213                 .ok_or(UnificationError(BufferTooSmall(Some(size_of::<EfiMemoryDescriptor>()))))?;
214 
215             generated_item.physical_start = base;
216             generated_item.number_of_pages = size / PAGE_SIZE;
217             generated_item.virtual_start = 0;
218             generated_item.attributes = EFI_MEMORY_ATTRIBUTE_EMA_RUNTIME;
219             generated_item.memory_type = efi_memory_desc.memory_type;
220         }
221     }
222 
223     let used_payload = provided_payload_size - tail.len();
224     header.descriptor_size = size_of::<EfiMemoryDescriptor>().try_into().map_err(Error::from)?;
225     header.number_of_entries =
226         (used_payload / size_of::<EfiMemoryDescriptor>()).try_into().unwrap();
227     header.reserved = 0;
228     header.version = 1;
229 
230     zbi_items.create_entry(
231         ZbiType::EfiMemoryAttributesTable,
232         0,
233         ZbiFlags::default(),
234         used_payload,
235     )?;
236 
237     Ok(())
238 }
239 
generate_mem_config_item<'b, B>( efi_memory_map: &EfiMemoryMap<'b>, zbi_items: &mut ZbiContainer<B>, ) -> Result<()> where B: ByteSliceMut + PartialEq,240 fn generate_mem_config_item<'b, B>(
241     efi_memory_map: &EfiMemoryMap<'b>,
242     zbi_items: &mut ZbiContainer<B>,
243 ) -> Result<()>
244 where
245     B: ByteSliceMut + PartialEq,
246 {
247     let mut tail = zbi_items.get_next_payload()?;
248     let provided_payload_size = tail.len();
249 
250     for efi_desc in efi_memory_map.into_iter() {
251         let mut zbi_mem_range: Ref<&mut [u8], zbi_mem_range_t>;
252         (zbi_mem_range, tail) = Ref::new_from_prefix(tail)
253             .ok_or(UnificationError(BufferTooSmall(Some(size_of::<zbi_mem_range_t>()))))?;
254         zbi_mem_range.paddr = efi_desc.physical_start;
255         zbi_mem_range.length = efi_desc.number_of_pages * PAGE_SIZE;
256         zbi_mem_range.type_ = efi_to_zbi_mem_range_type(efi_desc.memory_type);
257         zbi_mem_range.reserved = 0;
258     }
259 
260     let used_payload = provided_payload_size - tail.len();
261     zbi_items.create_entry(ZbiType::MemConfig, 0, ZbiFlags::default(), used_payload)?;
262 
263     Ok(())
264 }
265