1 // Copyright 2023, The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 use crate::{BlockIo, Disk, Result};
16 use core::{
17 array::from_fn,
18 cmp::min,
19 convert::TryFrom,
20 default::Default,
21 fmt::{Debug, Formatter},
22 mem::size_of,
23 num::NonZeroU64,
24 ops::{Deref, DerefMut},
25 str::from_utf8,
26 };
27 use crc32fast::Hasher;
28 use gbl_async::block_on;
29 use liberror::{Error, GptError};
30 use safemath::SafeNum;
31 use zerocopy::{AsBytes, ByteSlice, FromBytes, FromZeroes, Ref};
32
33 /// Number of bytes in GUID.
34 pub const GPT_GUID_LEN: usize = 16;
35 /// The maximum number of UTF-16 characters in a GPT partition name, including termination.
36 pub const GPT_NAME_LEN_U16: usize = 36;
37 const GPT_NAME_LEN_U8: usize = 2 * GPT_GUID_LEN;
38
39 /// The top-level GPT header.
40 #[repr(C, packed)]
41 #[derive(Debug, Default, Copy, Clone, AsBytes, FromBytes, FromZeroes, PartialEq, Eq)]
42 pub struct GptHeader {
43 /// Magic bytes; must be [GPT_MAGIC].
44 pub magic: u64,
45 /// Header version.
46 pub revision: u32,
47 /// Header size in bytes.
48 pub size: u32,
49 /// CRC of the first `size` bytes, calculated with this field zeroed.
50 pub crc32: u32,
51 /// Reserved; must be set to 0.
52 pub reserved0: u32,
53 /// The on-disk block location of this header.
54 pub current: u64,
55 /// The on-disk block location of the other header.
56 pub backup: u64,
57 /// First usable block for partition contents.
58 pub first: u64,
59 /// Last usable block for partition contents (inclusive).
60 pub last: u64,
61 /// Disk GUID.
62 pub guid: [u8; GPT_GUID_LEN],
63 /// Starting block for the partition entries array.
64 pub entries: u64,
65 /// Number of partition entries.
66 pub entries_count: u32,
67 /// The size of each partition entry in bytes.
68 pub entries_size: u32,
69 /// CRC of the partition entries array.
70 pub entries_crc: u32,
71 }
72
73 impl GptHeader {
74 /// Casts a bytes slice into a mutable GptHeader structure.
from_bytes_mut(bytes: &mut [u8]) -> &mut GptHeader75 pub fn from_bytes_mut(bytes: &mut [u8]) -> &mut GptHeader {
76 Ref::<_, GptHeader>::new_from_prefix(bytes).unwrap().0.into_mut()
77 }
78
79 /// Computes the actual crc32 value.
calculate_header_crc(&self) -> u3280 fn calculate_header_crc(&self) -> u32 {
81 let mut hasher = Hasher::new();
82 hasher.update(&self.as_bytes()[..GPT_CRC32_OFFSET]);
83 hasher.update(&[0u8; size_of::<u32>()]);
84 hasher.update(&self.as_bytes()[GPT_CRC32_OFFSET + size_of::<u32>()..]);
85 hasher.finalize()
86 }
87
88 /// Update the header crc32 value.
update_crc(&mut self)89 pub fn update_crc(&mut self) {
90 self.crc32 = self.calculate_header_crc();
91 }
92
93 /// Updates entries and header crc according to the given entries buffer.
update_entries_crc(&mut self, entries: &[u8])94 fn update_entries_crc(&mut self, entries: &[u8]) {
95 let size = SafeNum::from(self.entries_count) * self.entries_size;
96 self.entries_crc = crc32(&entries[..size.try_into().unwrap()]);
97 self.update_crc();
98 }
99 }
100
101 /// Computes the number of blocks for the 128 partition entries reserved space in GPT.
gpt_entries_blk(block_size: u64) -> Result<u64>102 fn gpt_entries_blk(block_size: u64) -> Result<u64> {
103 let size = u64::try_from(GPT_MAX_NUM_ENTRIES_SIZE).unwrap();
104 match size % block_size {
105 0 => Ok(size / block_size),
106 _ => Err(Error::InvalidInput),
107 }
108 }
109
110 /// Checks a header against a block device.
111 ///
112 /// # Args
113 ///
114 /// * `io`: An implementation of [BlockIo],
115 /// * `header`: The GPT header to verify.
116 /// * `is_primary`: If the header is a primary header.
check_header(io: &mut impl BlockIo, header: &GptHeader, is_primary: bool) -> Result<()>117 fn check_header(io: &mut impl BlockIo, header: &GptHeader, is_primary: bool) -> Result<()> {
118 let num_blks = SafeNum::from(io.info().num_blocks);
119 let blk_sz = io.info().block_size;
120
121 // GPT spec requires that at least 128 entries worth of space be reserved.
122 let min_reserved_entries_blk = gpt_entries_blk(blk_sz)?;
123 // Minimum space needed: 2 * (header + entries) + MBR.
124 let min_disk_blks: u64 = ((min_reserved_entries_blk + 1) * 2 + 1).try_into().unwrap();
125 if min_disk_blks > u64::try_from(num_blks).unwrap() {
126 return Err(Error::GptError(GptError::DiskTooSmall));
127 }
128
129 if header.magic != GPT_MAGIC {
130 return Err(Error::GptError(GptError::IncorrectMagic(header.magic)));
131 }
132
133 if header.calculate_header_crc() != header.crc32 {
134 return Err(Error::GptError(GptError::IncorrectHeaderCrc));
135 }
136
137 if header.size != size_of::<GptHeader>().try_into().unwrap() {
138 return Err(Error::GptError(GptError::UnexpectedHeaderSize {
139 actual: header.size,
140 expect: size_of::<GptHeader>(),
141 }));
142 }
143
144 if header.entries_size != size_of::<GptEntry>().try_into().unwrap() {
145 return Err(Error::GptError(GptError::UnexpectedEntrySize {
146 actual: header.entries_size,
147 expect: size_of::<GptEntry>(),
148 }));
149 }
150
151 // Checks first/last usable block.
152 //
153 // Assuming maximum range where partition entries are adjacent to GPT headers.
154 //
155 // Should leave a minimum space for MBR + primary header + primary entries before.
156 let min_first: u64 = (min_reserved_entries_blk + 2).try_into().unwrap();
157 // Should leave a minimum space for secondary header + secondary entries space after.
158 let max_last: u64 = (num_blks - 1 - min_reserved_entries_blk - 1).try_into().unwrap();
159 if header.first > header.last + 1 || header.first < min_first || header.last > max_last {
160 return Err(Error::GptError(GptError::InvalidFirstLastUsableBlock {
161 first: header.first,
162 last: header.last,
163 range: (min_first, max_last),
164 }));
165 }
166
167 // Checks entries starting block.
168 if is_primary {
169 // For primary header, entries must be before first usable block and can hold up to
170 // `GPT_MAX_NUM_ENTRIES` entries
171 let right: u64 =
172 (SafeNum::from(header.first) - min_reserved_entries_blk).try_into().unwrap();
173 if !(header.entries >= 2 && header.entries <= right) {
174 return Err(Error::GptError(GptError::InvalidPrimaryEntriesStart {
175 value: header.entries,
176 expect_range: (2, right),
177 }));
178 }
179 } else {
180 // For secondary header, entries must be after last usable block and can hold up to
181 // `GPT_MAX_NUM_ENTRIES` entries.
182 if !(header.entries > header.last && header.entries <= max_last + 1) {
183 return Err(Error::GptError(GptError::InvalidSecondaryEntriesStart {
184 value: header.entries,
185 expect_range: (header.last + 1, max_last + 1),
186 }));
187 }
188 }
189
190 if header.entries_count > GPT_MAX_NUM_ENTRIES.try_into().unwrap() {
191 return Err(Error::GptError(GptError::NumberOfEntriesOverflow {
192 entries: header.entries_count,
193 max_allowed: GPT_MAX_NUM_ENTRIES,
194 }));
195 }
196
197 Ok(())
198 }
199
200 /// Verifies the given entries against a verifed GPT header.
201 ///
202 /// # Args
203 ///
204 /// * `header`: The verified GPT header corresponding to the entries.
205 /// * `entries`: The buffer containing the entries.
check_entries(header: &GptHeader, entries: &[u8]) -> Result<()>206 fn check_entries(header: &GptHeader, entries: &[u8]) -> Result<()> {
207 // Checks entries CRC.
208 assert!(header.entries_count <= GPT_MAX_NUM_ENTRIES.try_into().unwrap());
209 let entries_size: usize =
210 (SafeNum::from(header.entries_count) * GPT_ENTRY_SIZE).try_into().unwrap();
211 let entries = entries.get(..entries_size).ok_or(Error::GptError(GptError::EntriesTruncated))?;
212 if header.entries_crc != crc32(entries) {
213 return Err(Error::GptError(GptError::IncorrectEntriesCrc));
214 }
215
216 // Checks each entry.
217 let entries = Ref::<_, [GptEntry]>::new_slice(entries)
218 .ok_or(Error::GptError(GptError::EntriesTruncated))?
219 .into_slice();
220 let entries = &entries[..header.entries_count.try_into().unwrap()];
221 for (idx, ele) in entries.iter().take_while(|v| !v.is_null()).enumerate() {
222 // Error information uses 1-base partition index.
223 let idx = idx.checked_add(1).unwrap();
224 let (first, last) = (ele.first, ele.last);
225 if first > last + 1 || last > header.last || first < header.first {
226 return Err(Error::GptError(GptError::InvalidPartitionRange {
227 idx,
228 part_range: (first, last),
229 usable_range: (header.first, header.last),
230 }));
231 } else if ele.part_type == [0u8; GPT_GUID_LEN] {
232 return Err(Error::GptError(GptError::ZeroPartitionTypeGUID { idx }));
233 } else if ele.guid == [0u8; GPT_GUID_LEN] {
234 return Err(Error::GptError(GptError::ZeroPartitionUniqueGUID { idx }));
235 }
236 }
237
238 // Checks overlap between partition ranges.
239 // Sorts an index array because we don't want to modify input.
240 let mut sorted_indices: [u8; GPT_MAX_NUM_ENTRIES] = from_fn(|i| i.try_into().unwrap());
241 sorted_indices.sort_unstable_by_key(|v| match entries.get(usize::try_from(*v).unwrap()) {
242 Some(v) if !v.is_null() => v.first,
243 _ => u64::MAX,
244 });
245
246 let actual = entries.iter().position(|v| v.is_null()).unwrap_or(entries.len());
247 if actual > 1 {
248 for i in 0..actual - 1 {
249 let prev: usize = sorted_indices[i].try_into().unwrap();
250 let next: usize = sorted_indices[i + 1].try_into().unwrap();
251 if entries[prev].last >= entries[next].first {
252 return Err(Error::GptError(GptError::PartitionRangeOverlap {
253 prev: (prev + 1, entries[prev].first, entries[prev].last),
254 next: (next + 1, entries[next].first, entries[next].last),
255 }));
256 }
257 }
258 }
259
260 Ok(())
261 }
262
263 /// GptEntry is the partition entry data structure in the GPT.
264 #[repr(C, packed)]
265 #[derive(Debug, Copy, Clone, AsBytes, FromBytes, FromZeroes, PartialEq)]
266 pub struct GptEntry {
267 /// Partition type GUID.
268 pub part_type: [u8; GPT_GUID_LEN],
269 /// Unique partition GUID.
270 pub guid: [u8; GPT_GUID_LEN],
271 /// First block.
272 pub first: u64,
273 /// Last block (inclusive).
274 pub last: u64,
275 /// Partition flags.
276 pub flags: u64,
277 /// Partition name in UTF-16.
278 pub name: [u16; GPT_NAME_LEN_U16],
279 }
280
281 impl GptEntry {
282 /// Return the partition entry size in blocks.
blocks(&self) -> Result<u64>283 pub fn blocks(&self) -> Result<u64> {
284 // Must perform "+1" first before subtracting `self.first`. Otherwise if partition size is
285 // zero, where `self.first > self.last`, arithmetic will overflow.
286 u64::try_from(SafeNum::from(self.last) + 1 - self.first).map_err(Into::into)
287 }
288
289 /// Return whether this is a `NULL` entry. The first null entry marks the end of the partition
290 /// entries.
is_null(&self) -> bool291 fn is_null(&self) -> bool {
292 self.first == 0 && self.last == 0
293 }
294
295 /// Decode the partition name into a string. A length N utf16 string can be at most 2N utf8
296 /// bytes. Therefore, a safe size of `buffer` is 2*GPT_NAME_LEN_U16 = 72.
name_to_str<'a>(&self, buffer: &'a mut [u8]) -> Result<&'a str>297 pub fn name_to_str<'a>(&self, buffer: &'a mut [u8]) -> Result<&'a str> {
298 let mut index = 0;
299 for c in char::decode_utf16(self.name) {
300 match c.unwrap_or(char::REPLACEMENT_CHARACTER) {
301 '\0' => break,
302 c if c.len_utf8() <= buffer[index..].len() => {
303 index += c.encode_utf8(&mut buffer[index..]).len()
304 }
305 _ => return Err(Error::InvalidInput), // Not enough space in `buffer`.
306 }
307 }
308 // SAFETY:
309 // _unchecked should be OK here since we wrote each utf8 byte ourselves,
310 // but it's just an optimization, checked version would be fine also.
311 unsafe { Ok(core::str::from_utf8_unchecked(&buffer[..index])) }
312 }
313
314 /// Checks if the partition name is the same as the given.
match_name(&self, part: &str) -> Result<bool>315 pub fn match_name(&self, part: &str) -> Result<bool> {
316 Ok(self.name_to_str(&mut [0u8; GPT_NAME_LEN_U16 * 2][..])? == part)
317 }
318 }
319
320 impl core::fmt::Display for GptEntry {
fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result321 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
322 // Format: partition name: "abc", [first, last]: [123, 456]
323 let mut name_conversion_buffer = [0u8; GPT_NAME_LEN_U16 * 2];
324 let name = self.name_to_str(&mut name_conversion_buffer).map_err(|_| core::fmt::Error)?;
325 // Note: The bracket around `{ self.first }` is for forcing a copy of the field because
326 // GptEntry is a packed structure.
327 write!(f, "partition: \"{}\", first: {}, last: {}", name, { self.first }, { self.last })
328 }
329 }
330
331 // core::mem::offset_of!(GptHeader, crc32) is unsatble feature and rejected by the compiler in our
332 // settings. We pre-compute the value here.
333 const GPT_CRC32_OFFSET: usize = 16;
334 const GPT_ENTRY_SIZE: usize = size_of::<GptEntry>();
335 const GPT_MAX_NUM_ENTRIES: usize = 128;
336 const GPT_MAX_NUM_ENTRIES_SIZE: usize = GPT_MAX_NUM_ENTRIES * GPT_ENTRY_SIZE;
337 /// GPT header magic bytes ("EFI PART" in ASCII).
338 pub const GPT_MAGIC: u64 = 0x5452415020494645;
339
340 enum HeaderType {
341 Primary,
342 Secondary,
343 }
344
345 /// `Partition` contains information about a GPT partition.
346 #[derive(Debug, Copy, Clone, PartialEq)]
347 pub struct Partition {
348 entry: GptEntry,
349 block_size: u64,
350 decoded_name: Option<([u8; GPT_NAME_LEN_U8], usize)>,
351 }
352
353 impl Partition {
354 /// Creates a new instance.
new(entry: GptEntry, block_size: u64) -> Self355 fn new(entry: GptEntry, block_size: u64) -> Self {
356 let mut buf = [0u8; GPT_NAME_LEN_U8];
357 let decoded_name = match entry.name_to_str(&mut buf[..]).ok().map(|v| v.len()) {
358 Some(len) => Some((buf, len)),
359 _ => None,
360 };
361 Self { entry, block_size, decoded_name }
362 }
363
364 /// Gets the decoded partition name.
name(&self) -> Option<&str>365 pub fn name(&self) -> Option<&str> {
366 // Correct by construction. `from_utf8` should not fail.
367 self.decoded_name.as_ref().map(|(buf, sz)| from_utf8(&buf[..*sz]).unwrap())
368 }
369
370 /// Returns the partition size in bytes.
size(&self) -> Result<u64>371 pub fn size(&self) -> Result<u64> {
372 u64::try_from(SafeNum::from(self.entry.blocks()?) * self.block_size).map_err(Error::from)
373 }
374
375 /// Returns the block size of this partition.
block_size(&self) -> u64376 pub fn block_size(&self) -> u64 {
377 self.block_size
378 }
379
380 /// Returns the partition entry structure in the GPT header.
gpt_entry(&self) -> &GptEntry381 pub fn gpt_entry(&self) -> &GptEntry {
382 &self.entry
383 }
384
385 /// Returns the partition's absolute start/end offset in number of bytes.
absolute_range(&self) -> Result<(u64, u64)>386 pub fn absolute_range(&self) -> Result<(u64, u64)> {
387 let start = SafeNum::from(self.entry.first) * self.block_size;
388 let end = (SafeNum::from(self.entry.last) + 1) * self.block_size;
389 Ok((start.try_into()?, end.try_into()?))
390 }
391
392 /// Checks a given sub range and returns its absolute offset.
check_range(&self, off: u64, size: u64) -> Result<u64>393 pub fn check_range(&self, off: u64, size: u64) -> Result<u64> {
394 let off = SafeNum::from(off);
395 let end: u64 = (off + size).try_into()?;
396 match end > self.size()? {
397 true => Err(Error::BadIndex(end as usize)),
398 _ => Ok((off + self.absolute_range()?.0).try_into()?),
399 }
400 }
401 }
402
403 /// `PartitionIterator` iterates all GPT partition entries.
404 pub struct PartitionIterator<'a> {
405 entries: &'a [GptEntry],
406 block_size: u64,
407 idx: usize,
408 }
409
410 impl Iterator for PartitionIterator<'_> {
411 type Item = Partition;
412
next(&mut self) -> Option<Self::Item>413 fn next(&mut self) -> Option<Self::Item> {
414 let res = self
415 .entries
416 .get(self.idx)
417 .filter(|v| !v.is_null())
418 .map(|v| Partition::new(*v, self.block_size))?;
419 self.idx += 1;
420 Some(res)
421 }
422 }
423
424 /// Contains result of GPT syncing/restoration.
425 #[derive(Copy, Clone, PartialEq, Debug, Default)]
426 pub enum GptSyncResult {
427 /// Both primary and secondary GPT are valid.
428 #[default]
429 BothValid,
430 /// Primary GPT is invalid and restored.
431 PrimaryRestored(Error),
432 /// Secondary GPT is invalid and restored.
433 SecondaryRestored(Error),
434 /// Neither primary or secondary GPT is valid.
435 NoValidGpt {
436 /// Primary GPT verify error.
437 primary: Error,
438 /// Secondary GPT verify error.
439 secondary: Error,
440 },
441 }
442
443 impl GptSyncResult {
444 /// Combined into a result
res(&self) -> Result<()>445 pub fn res(&self) -> Result<()> {
446 match self {
447 Self::NoValidGpt { primary: e, .. } => Err(*e),
448 _ => Ok(()),
449 }
450 }
451 }
452
453 impl core::fmt::Display for GptSyncResult {
fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result454 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
455 match self {
456 Self::BothValid => write!(f, "Found valid GPT."),
457 Self::PrimaryRestored(e) => write!(f, "Primary GPT restored due to {e:?}."),
458 Self::SecondaryRestored(e) => write!(f, "Secondary GPT restored due to {e:?}."),
459 Self::NoValidGpt { primary, secondary } => {
460 write!(f, "No valid GPT. primary: {primary:?}, secondary: {secondary:?}.")
461 }
462 }
463 }
464 }
465
466 /// A packed wrapper of `Option<NonZeroU64>`
467 #[repr(C, packed)]
468 #[derive(Debug, Copy, Clone, AsBytes, FromBytes, FromZeroes)]
469 struct BlockSize(Option<NonZeroU64>);
470
471 /// Represents the structure of a load buffer for loading/verifying/syncing up to N GPT entries.
472 #[repr(C, packed)]
473 #[derive(Debug, Copy, Clone, AsBytes, FromBytes, FromZeroes)]
474 pub struct GptLoadBufferN<const N: usize> {
475 // GPT doesn't care about block size. But it's easier to have it available for computing offset
476 // and size in bytes for partitions. It's also used as a flag for indicating whether a valid
477 // GPT is loaded.
478 block_size: BlockSize,
479 primary_header: GptHeader,
480 secondary_header: GptHeader,
481 primary_entries: [GptEntry; N],
482 secondary_entries: [GptEntry; N],
483 }
484
485 impl<const N: usize> Deref for GptLoadBufferN<N> {
486 type Target = [u8];
487
deref(&self) -> &Self::Target488 fn deref(&self) -> &Self::Target {
489 self.as_bytes()
490 }
491 }
492
493 impl<const N: usize> DerefMut for GptLoadBufferN<N> {
deref_mut(&mut self) -> &mut Self::Target494 fn deref_mut(&mut self) -> &mut Self::Target {
495 self.as_bytes_mut()
496 }
497 }
498
499 /// Contains references corresponding to different GPT load entities parsed from a load buffer.
500 ///
501 /// The structure is simply for organizing together the individual references of fields in
502 /// `GptLoadBufferN` parsed from a raw buffer. Note that we can't parse a `Ref<B, GptLoadBufferN>`
503 /// directly from a buffer because the number of entries (length of [GptEntry]) in this case needs
504 /// to be computed at run time based on the buffer size.
505 struct LoadBufferRef<B: ByteSlice> {
506 block_size: Ref<B, BlockSize>,
507 primary_header: Ref<B, GptHeader>,
508 secondary_header: Ref<B, GptHeader>,
509 primary_entries: Ref<B, [GptEntry]>,
510 secondary_entries: Ref<B, [GptEntry]>,
511 }
512
513 impl<B: ByteSlice> LoadBufferRef<B> {
from(buffer: B) -> Self514 fn from(buffer: B) -> Self {
515 let n = min(GPT_MAX_NUM_ENTRIES, max_supported_entries(&buffer[..]).unwrap());
516 let (block_size, rest) = Ref::new_from_prefix(buffer).unwrap();
517 let (primary_header, rest) = Ref::new_from_prefix(rest).unwrap();
518 let (secondary_header, rest) = Ref::new_from_prefix(rest).unwrap();
519 let (primary_entries, rest) = Ref::new_slice_from_prefix(rest, n).unwrap();
520 let (secondary_entries, _) = Ref::new_slice_from_prefix(rest, n).unwrap();
521 Self { block_size, primary_header, secondary_header, primary_entries, secondary_entries }
522 }
523
524 /// Unpacks into the secondary GPT header/entries
secondary(self) -> (Ref<B, GptHeader>, Ref<B, [GptEntry]>)525 fn secondary(self) -> (Ref<B, GptHeader>, Ref<B, [GptEntry]>) {
526 (self.secondary_header, self.secondary_entries)
527 }
528 }
529
530 /// The minimum buffer size needed for creating a [Gpt] that can load `entries` number of
531 /// partitions.
532 ///
533 /// # Returns
534 ///
535 /// * Returns Ok(size) on success.
536 /// * Returns Err(Error::InvalidInput) if max_entries is greater than 128.
gpt_buffer_size(entries: usize) -> Result<usize>537 pub fn gpt_buffer_size(entries: usize) -> Result<usize> {
538 match entries > GPT_MAX_NUM_ENTRIES {
539 true => Err(Error::InvalidInput),
540 _ => Ok(size_of::<GptLoadBufferN<0>>() + entries * GPT_ENTRY_SIZE * 2),
541 }
542 }
543
544 /// Computes the maximum number of entries that can be loaded if using the given buffer for [Gpt].
max_supported_entries(buf: &[u8]) -> Result<usize>545 fn max_supported_entries(buf: &[u8]) -> Result<usize> {
546 match buf.len() < size_of::<GptLoadBufferN<0>>() {
547 true => Err(Error::BufferTooSmall(Some(size_of::<GptLoadBufferN<0>>()))),
548 _ => Ok((buf.len() - size_of::<GptLoadBufferN<0>>()) / 2 / GPT_ENTRY_SIZE),
549 }
550 }
551
552 /// [Gpt] manages a buffer for loading, verifying and syncing GPT.
553 pub struct Gpt<B> {
554 buffer: B,
555 }
556
557 impl<B: DerefMut<Target = [u8]>> Gpt<B> {
558 /// Create an uninitialized Gpt instance from a provided buffer.
559 ///
560 /// The created [Gpt] can then be used in `Disk::sync_gpt()` for loading, verifying and syncing
561 /// GPT on disk.
562 ///
563 /// # Args:
564 ///
565 /// * `buffer`: A buffer to use for loading, verifying and syncing primary and secondary GPT.
566 /// The size of the buffer determines the maximum number of partition entries that can be
567 /// loaded. If actual number of partitions, specified by `entries_count` in the GPT header,
568 /// exceeds it, verification and sync will eventually fail with `Error::BufferTooSmall`.
569 /// `gpt_buffer_size(num_entries)` can be used to compute the required size of buffer for
570 /// loading a specific number of entries. Note that most tools and OS fix the `entries_count`
571 /// value to the max 128 regardless of the actual number of partition entries used. Thus
572 /// unless you have full control of GPT generation in your entire system where you can always
573 /// ensure a smaller bound on it, it is recommended to always provide enough buffer for
574 /// loading 128 entries.
575 ///
576 /// # Returns
577 ///
578 /// * Returns Ok(Self) on success.
579 /// * Returns Err(Error::BufferTooSmall) if buffer is less than the minimum size.
new(mut buffer: B) -> Result<Self>580 pub fn new(mut buffer: B) -> Result<Self> {
581 max_supported_entries(&buffer[..])?;
582 LoadBufferRef::from(&mut buffer[..]).block_size.0 = None;
583 Ok(Self { buffer })
584 }
585
586 /// Returns the maximum allowed entries.
max_entries(&self) -> usize587 pub fn max_entries(&self) -> usize {
588 max_supported_entries(&self.buffer[..]).unwrap()
589 }
590
591 /// Creates an instance of `Gpt<&mut [u8]>` that borrows the internal GPT buffer.
as_borrowed(&mut self) -> Gpt<&mut [u8]>592 pub fn as_borrowed(&mut self) -> Gpt<&mut [u8]> {
593 Gpt { buffer: &mut self.buffer[..] }
594 }
595
596 /// Returns an iterator to GPT partition entries.
597 ///
598 /// If the object does not contain a valid GPT, the method returns Error.
partition_iter(&self) -> Result<PartitionIterator>599 pub fn partition_iter(&self) -> Result<PartitionIterator> {
600 let block_size = self.check_valid()?;
601 let entries = LoadBufferRef::from(&self.buffer[..]).primary_entries.into_slice();
602 Ok(PartitionIterator { entries, idx: 0, block_size })
603 }
604
605 /// Checks if a read/write range into a GPT partition overflows and returns the range's absolute
606 /// offset in number of bytes.
check_range(&self, part_name: &str, offset: u64, size: usize) -> Result<u64>607 pub fn check_range(&self, part_name: &str, offset: u64, size: usize) -> Result<u64> {
608 self.find_partition(part_name)?.check_range(offset, u64::try_from(size)?)
609 }
610
611 /// Return the list of GPT entries.
612 ///
613 /// If there is not a valid GPT, the method returns Error.
entries(&self) -> Result<&[GptEntry]>614 pub fn entries(&self) -> Result<&[GptEntry]> {
615 self.check_valid()?;
616 let entries = LoadBufferRef::from(&self.buffer[..]).primary_entries.into_slice();
617 let n = entries.iter().position(|v| v.is_null()).unwrap_or(entries.len());
618 Ok(&entries[..n])
619 }
620
621 /// Returns the total number of partitions.
num_partitions(&self) -> Result<usize>622 pub fn num_partitions(&self) -> Result<usize> {
623 Ok(self.entries()?.len())
624 }
625
626 /// Gets the `idx`th partition.
get_partition(&self, idx: usize) -> Result<Partition>627 pub fn get_partition(&self, idx: usize) -> Result<Partition> {
628 let block_size = self.check_valid()?;
629 let entry = *self.entries()?.get(idx).ok_or(Error::BadIndex(idx))?;
630 Ok(Partition::new(entry, block_size))
631 }
632
633 /// Returns the `Partition` for a partition.
634 ///
635 /// # Args
636 ///
637 /// * `part`: Name of the partition.
find_partition(&self, part: &str) -> Result<Partition>638 pub fn find_partition(&self, part: &str) -> Result<Partition> {
639 let block_size = self.check_valid()?;
640 for entry in self.entries()? {
641 let mut name_conversion_buffer = [0u8; GPT_NAME_LEN_U16 * 2];
642 if entry.name_to_str(&mut name_conversion_buffer)? != part {
643 continue;
644 }
645 return Ok(Partition::new(*entry, block_size));
646 }
647 Err(Error::NotFound)
648 }
649
650 /// Checks whether the Gpt has been initialized and returns the block size.
check_valid(&self) -> Result<u64>651 fn check_valid(&self) -> Result<u64> {
652 Ok(LoadBufferRef::from(&self.buffer[..]).block_size.0.ok_or(Error::InvalidState)?.get())
653 }
654
655 /// Helper function for loading and validating GPT header and entries.
load_and_validate_gpt( &mut self, disk: &mut Disk<impl BlockIo, impl DerefMut<Target = [u8]>>, hdr_type: HeaderType, ) -> Result<()>656 async fn load_and_validate_gpt(
657 &mut self,
658 disk: &mut Disk<impl BlockIo, impl DerefMut<Target = [u8]>>,
659 hdr_type: HeaderType,
660 ) -> Result<()> {
661 let blk_sz = disk.io().info().block_size;
662 let load = LoadBufferRef::from(&mut self.buffer[..]);
663 let (header_start, mut header, mut entries) = match hdr_type {
664 HeaderType::Primary => (blk_sz, load.primary_header, load.primary_entries),
665 HeaderType::Secondary => (
666 ((SafeNum::from(disk.io().info().num_blocks) - 1) * blk_sz).try_into()?,
667 load.secondary_header,
668 load.secondary_entries,
669 ),
670 };
671
672 // Loads the header
673 disk.read(header_start, header.as_bytes_mut()).await?;
674 // Checks header.
675 check_header(disk.io(), &header, matches!(hdr_type, HeaderType::Primary))?;
676 // Loads the entries.
677 let entries_size = SafeNum::from(header.entries_count) * GPT_ENTRY_SIZE;
678 let entries_offset = SafeNum::from(header.entries) * blk_sz;
679 let out = entries.as_bytes_mut().get_mut(..entries_size.try_into().unwrap()).ok_or(
680 Error::BufferTooSmall(Some(
681 gpt_buffer_size(header.entries_count.try_into().unwrap()).unwrap(),
682 )),
683 )?;
684 disk.read(entries_offset.try_into().unwrap(), out).await?;
685 // Checks entries.
686 check_entries(&header, entries.as_bytes())
687 }
688
689 /// Loads and syncs GPT from a block device.
690 ///
691 /// * Returns Ok(sync_result) if disk IO is successful, where `sync_result` contains the GPT
692 /// verification and restoration result,
693 /// * Returns Err() if disk IO encounters error.
load_and_sync( &mut self, disk: &mut Disk<impl BlockIo, impl DerefMut<Target = [u8]>>, ) -> Result<GptSyncResult>694 pub(crate) async fn load_and_sync(
695 &mut self,
696 disk: &mut Disk<impl BlockIo, impl DerefMut<Target = [u8]>>,
697 ) -> Result<GptSyncResult> {
698 let blk_sz = disk.io().info().block_size;
699 let nonzero_blk_sz = NonZeroU64::new(blk_sz).ok_or(Error::InvalidInput)?;
700 let total_blocks: SafeNum = disk.io().info().num_blocks.into();
701
702 let primary_header_blk = 1;
703 let primary_header_pos = blk_sz;
704 let secondary_header_blk = total_blocks - 1;
705
706 // Entries position for restoring.
707 let primary_entries_blk = 2;
708 let primary_entries_pos = SafeNum::from(primary_entries_blk) * blk_sz;
709 let primary_res = self.load_and_validate_gpt(disk, HeaderType::Primary).await;
710 let secondary_res = self.load_and_validate_gpt(disk, HeaderType::Secondary).await;
711
712 let LoadBufferRef {
713 mut block_size,
714 mut primary_header,
715 mut secondary_header,
716 mut primary_entries,
717 mut secondary_entries,
718 } = LoadBufferRef::from(&mut self.buffer[..]);
719 block_size.0 = None;
720 let primary_entries = primary_entries.as_bytes_mut();
721 let secondary_entries = secondary_entries.as_bytes_mut();
722 let sync_res = match (primary_res, secondary_res) {
723 (Err(primary), Err(secondary)) => GptSyncResult::NoValidGpt { primary, secondary },
724 (Ok(()), Ok(())) if is_consistent(&primary_header, &secondary_header) => {
725 GptSyncResult::BothValid
726 }
727 (Err(e), Ok(())) => {
728 // Restores to primary
729 primary_header.as_bytes_mut().clone_from_slice(secondary_header.as_bytes());
730 primary_entries.clone_from_slice(&secondary_entries);
731 primary_header.current = primary_header_blk;
732 primary_header.backup = secondary_header_blk.try_into()?;
733 primary_header.entries = primary_entries_blk;
734 primary_header.update_crc();
735
736 disk.write(primary_header_pos, primary_header.as_bytes_mut()).await?;
737 disk.write(primary_entries_pos.try_into()?, primary_entries).await?;
738 GptSyncResult::PrimaryRestored(e)
739 }
740 (Ok(()), v) => {
741 // Restores to secondary
742 let pos = secondary_header_blk * blk_sz;
743 let secondary_entries_pos = pos - GPT_MAX_NUM_ENTRIES_SIZE;
744 let secondary_entries_blk = secondary_entries_pos / blk_sz;
745
746 secondary_header.as_bytes_mut().clone_from_slice(primary_header.as_bytes());
747 secondary_entries.clone_from_slice(primary_entries);
748 secondary_header.current = secondary_header_blk.try_into()?;
749 secondary_header.backup = primary_header_blk;
750 secondary_header.entries = secondary_entries_blk.try_into()?;
751 secondary_header.update_crc();
752
753 disk.write(pos.try_into()?, secondary_header.as_bytes_mut()).await?;
754 disk.write(secondary_entries_pos.try_into()?, secondary_entries).await?;
755
756 GptSyncResult::SecondaryRestored(match v {
757 Err(e) => e,
758 _ => Error::GptError(GptError::DifferentFromPrimary),
759 })
760 }
761 };
762
763 block_size.0 = Some(nonzero_blk_sz);
764 Ok(sync_res)
765 }
766 }
767
768 /// Checks whether primary and secondary header
is_consistent(primary: &GptHeader, secondary: &GptHeader) -> bool769 fn is_consistent(primary: &GptHeader, secondary: &GptHeader) -> bool {
770 let mut expected_secondary = *primary;
771 expected_secondary.crc32 = secondary.crc32;
772 expected_secondary.current = secondary.current;
773 expected_secondary.backup = 1;
774 expected_secondary.entries = secondary.entries;
775 &expected_secondary == secondary
776 }
777
778 /// A [Gpt] that owns a `GptLoadBufferN<N>` and can load up to N partition entries.
779 ///
780 /// Note: The size of this type increases with N and can be expensive to store on stack. It is
781 /// typically intended for resource abundant environment such as test.
782 pub type GptN<const N: usize> = Gpt<GptLoadBufferN<N>>;
783
784 /// Creates an instance of GptN.
new_gpt_n<const N: usize>() -> GptN<N>785 pub fn new_gpt_n<const N: usize>() -> GptN<N> {
786 Gpt::new(GptLoadBufferN::<N>::new_zeroed()).unwrap()
787 }
788
789 /// A [Gpt] that owns a `GptLoadBufferN<128>` and can load the maximum 128 partition entries.
790 ///
791 /// Note: The size of this type is approximately 34K and can be expensive to store on stack. It
792 /// is typically intended for resource abundant environment such as test.
793 pub type GptMax = GptN<GPT_MAX_NUM_ENTRIES>;
794
795 /// Creates an instance of GptMax.
new_gpt_max() -> GptMax796 pub fn new_gpt_max() -> GptMax {
797 new_gpt_n::<GPT_MAX_NUM_ENTRIES>()
798 }
799
800 /// Updates GPT on a block device.
801 ///
802 /// # Args
803 ///
804 /// * `io`: An implementation of [BlockIo]
805 /// * `scratch`: Scratch buffer for unaligned read write.
806 /// * `mbr_primary`: A buffer containing the MBR block, primary GPT header and entries.
807 /// * `resize`: If set to true, the method updates the last partition to cover the rest of the
808 /// storage.
809 /// * `gpt`: The output [Gpt] to update.
update_gpt( disk: &mut Disk<impl BlockIo, impl DerefMut<Target = [u8]>>, mbr_primary: &mut [u8], resize: bool, gpt: &mut Gpt<impl DerefMut<Target = [u8]>>, ) -> Result<()>810 pub(crate) async fn update_gpt(
811 disk: &mut Disk<impl BlockIo, impl DerefMut<Target = [u8]>>,
812 mbr_primary: &mut [u8],
813 resize: bool,
814 gpt: &mut Gpt<impl DerefMut<Target = [u8]>>,
815 ) -> Result<()> {
816 let blk_sz: usize = disk.io().info().block_size.try_into()?;
817 let (header, remain) = mbr_primary
818 .get_mut(blk_sz..)
819 .map(|v| v.split_at_mut_checked(blk_sz))
820 .flatten()
821 .ok_or(Error::BufferTooSmall(Some(blk_sz * 2)))?;
822 let header = Ref::<_, GptHeader>::new_from_prefix(&mut header[..]).unwrap().0.into_mut();
823
824 // Adjusts last usable block according to this device in case the GPT was generated for a
825 // different disk size. If this results in some partition being out of range, it will be
826 // caught during `check_header()`.
827 let entries_blk = SafeNum::from(GPT_MAX_NUM_ENTRIES_SIZE) / blk_sz;
828 // Reserves only secondary GPT header and entries.
829 let num_blks = SafeNum::from(disk.io().info().num_blocks);
830 header.last = (num_blks - entries_blk - 2).try_into().unwrap();
831 header.backup = (num_blks - 1).try_into().unwrap();
832 header.update_crc();
833
834 check_header(disk.io(), &header, true)?;
835 // Computes entries offset in bytes relative to `remain`
836 let entries_off: usize = ((SafeNum::from(header.entries) - 2) * blk_sz).try_into().unwrap();
837 let entries_size: usize =
838 (SafeNum::from(header.entries_count) * header.entries_size).try_into().unwrap();
839 let entries = remain
840 .get_mut(entries_off..)
841 .map(|v| v.get_mut(..entries_size))
842 .flatten()
843 .ok_or(Error::BufferTooSmall(Some(2 * blk_sz + entries_off + entries_size)))?;
844 check_entries(&header, entries)?;
845
846 if resize {
847 // Updates the last entry to cover the rest of the storage.
848 let gpt_entries =
849 Ref::<_, [GptEntry]>::new_slice(&mut entries[..]).unwrap().into_mut_slice();
850 gpt_entries.iter_mut().filter(|e| !e.is_null()).last().map(|v| v.last = header.last);
851 header.update_entries_crc(entries);
852 // Re-verifies everything.
853 check_header(disk.io(), &header, true).unwrap();
854 check_entries(&header, entries).unwrap();
855 }
856
857 disk.write(0, mbr_primary).await?;
858 disk.sync_gpt(gpt).await?.res()
859 }
860
861 /// Erases GPT if there is one on the device.
erase_gpt( disk: &mut Disk<impl BlockIo, impl DerefMut<Target = [u8]>>, gpt: &mut Gpt<impl DerefMut<Target = [u8]>>, ) -> Result<()>862 pub(crate) async fn erase_gpt(
863 disk: &mut Disk<impl BlockIo, impl DerefMut<Target = [u8]>>,
864 gpt: &mut Gpt<impl DerefMut<Target = [u8]>>,
865 ) -> Result<()> {
866 match disk.sync_gpt(gpt).await?.res() {
867 Err(_) => Ok(()), // No valid GPT. Nothing to erase.
868 _ => {
869 let blk_sz = disk.block_info().block_size;
870 let mut load = LoadBufferRef::from(&mut gpt.buffer[..]);
871 let entries_size = SafeNum::from(load.primary_header.entries_count) * GPT_ENTRY_SIZE;
872 let scratch = load.primary_entries.as_bytes_mut();
873 // Invalidate GPT first.
874 load.block_size.0 = None;
875 // Erases primary header/entries.
876 let header = load.primary_header.current;
877 let entries = load.primary_header.entries;
878 disk.fill(header * blk_sz, blk_sz, 0, scratch).await?;
879 disk.fill(entries * blk_sz, entries_size.try_into().unwrap(), 0, scratch).await?;
880 // Erases secondary header/entries.
881 let header = load.secondary_header.current;
882 let entries = load.secondary_header.entries;
883 disk.fill(header * blk_sz, blk_sz, 0, scratch).await?;
884 disk.fill(entries * blk_sz, entries_size.try_into().unwrap(), 0, scratch).await?;
885 Ok(())
886 }
887 }
888 }
889
890 /// Computes the minimum blocks needed for creating a GPT.
min_required_blocks(block_size: u64) -> Result<u64>891 fn min_required_blocks(block_size: u64) -> Result<u64> {
892 // MBR + primary/secondary GPT header block + primary/secondary entries blocks.
893 Ok(1 + (1 + gpt_entries_blk(block_size)?) * 2)
894 }
895
896 /// `GptBuilder` provides API for modifying/creating GPT partition table on a disk.
897 pub struct GptBuilder<D, G> {
898 disk: D,
899 gpt: G,
900 }
901
902 impl<D: Debug, G: Debug> Debug for GptBuilder<D, G> {
fmt(&self, f: &mut Formatter<'_>) -> core::result::Result<(), core::fmt::Error>903 fn fmt(&self, f: &mut Formatter<'_>) -> core::result::Result<(), core::fmt::Error> {
904 write!(f, "GptBuilder {{ disk: {:?}, gpt: {:?} }}", self.disk, self.gpt)
905 }
906 }
907 // Generic parameters:
908 //
909 // * T: The type that implement BlockIo.
910 // * S: The type for the scratch buffer in `Self::disk`.
911 // * B: The type for the GPT buffer in `Self::gpt`.
912 // * D: The type for `Self::disk` which can dereference to a Disk<T, S>.
913 // * G: The type for `Self::gpt` which can dereference to a Gpt<B>.
914 impl<'a, T, S, B, D, G> GptBuilder<D, G>
915 where
916 T: BlockIo,
917 S: DerefMut<Target = [u8]>,
918 B: DerefMut<Target = [u8]>,
919 D: DerefMut<Target = Disk<T, S>>,
920 G: DerefMut<Target = Gpt<B>>,
921 {
922 /// Creates a new instance.
923 ///
924 /// The method always re-syncs the GPT. If `disk` does not contain a valid GPT, a new GPT is
925 /// started from scratch.
926 ///
927 /// The partition entries will always be sorted when writing back to disk by `Self::persist()`.
928 ///
929 /// # Returns
930 ///
931 /// * Returns Ok((Self, true)) if an instance is created and the disk has a valid GPT.
932 /// * Returns Ok((Self, false)) if an instance is created but disk does not have a valid GPT.
933 /// * Returns Err() otherwise.
new(mut disk: D, mut gpt: G) -> Result<(Self, bool)>934 pub fn new(mut disk: D, mut gpt: G) -> Result<(Self, bool)> {
935 if disk.block_info().num_blocks < min_required_blocks(disk.block_info().block_size)? {
936 return Err(Error::GptError(GptError::DiskTooSmall));
937 }
938 let has_valid_gpt = block_on(disk.sync_gpt(&mut gpt))?.res().is_ok();
939 // Uses the buffer for secondary GPT header/entries as construction buffer, as it is not
940 // used by Gpt once loaded and synced.
941 let (mut header, mut entries) = LoadBufferRef::from(&mut gpt.buffer[..]).secondary();
942 if !has_valid_gpt {
943 header.as_bytes_mut().fill(0);
944 entries.as_bytes_mut().fill(0);
945 let entries_blk = gpt_entries_blk(disk.block_info().block_size).unwrap();
946 // Initializes a secondary header.
947 let num_blks = SafeNum::from(disk.block_info().num_blocks);
948 header.magic = GPT_MAGIC;
949 header.current = (num_blks - 1).try_into().unwrap();
950 header.backup = 1;
951 header.size = size_of::<GptHeader>().try_into().unwrap();
952 header.first = 1 + 1 + entries_blk; // MBR + GPT header blocks + entries block
953 header.last = (num_blks - 1 - entries_blk - 1).try_into().unwrap();
954 header.entries = (num_blks - 1 - entries_blk).try_into().unwrap();
955 header.entries_count = 0;
956 header.entries_size = size_of::<GptEntry>().try_into().unwrap();
957 }
958 // Normalizes `entries_count` to actual valid entries. Some GPT disk fixes `entry_count` to
959 // 128.
960 header.entries_count =
961 entries.iter().position(|v| v.is_null()).unwrap_or(entries.len()).try_into().unwrap();
962 entries.sort_unstable_by_key(|v| match v.is_null() {
963 true => u64::MAX,
964 _ => v.first,
965 });
966 Ok((Self { disk, gpt }, has_valid_gpt))
967 }
968
969 /// Removes a partition.
970 ///
971 /// # Returns
972 ///
973 /// * Returns Ok(true) if found and removed.
974 /// * Returns Ok(false) if not found.
975 /// * Returns Err() otherwise.
remove(&mut self, part: &str) -> Result<bool>976 pub fn remove(&mut self, part: &str) -> Result<bool> {
977 let (mut header, mut entries) = LoadBufferRef::from(&mut self.gpt.buffer[..]).secondary();
978 let entries = &mut entries[..header.entries_count.try_into().unwrap()];
979 match entries.iter().position(|v| v.match_name(part).unwrap_or(false)) {
980 Some(n) => {
981 // Shift the elements behind forward.
982 entries[n..].rotate_left(1);
983 // Zeroizes the last element.
984 entries.last_mut().unwrap().as_bytes_mut().fill(0);
985 header.entries_count -= 1;
986 Ok(true)
987 }
988 _ => Ok(false),
989 }
990 }
991
992 /// Inserts a new partition before a partition.
993 ///
994 /// # Args
995 ///
996 /// * `idx`: Index of the partition to insert before. If index is out of range of valid entries,
997 /// the partition will be inserted at the last.
998 /// * `name`: Name of the partition.
999 /// * `part_type`: Type GUID.
1000 /// * `unique_guid`: Unique GUID.
1001 /// * `flags`: Partition flag.
1002 /// * `size`: If Some(_), specifies the size in number of bytes for the partition. The method
1003 /// will round it up to multiple of disk block size and check that there is enough space for
1004 /// the partition. If None, the method will insert the partition and consumes all the
1005 /// available space in between.
insert_before( &mut self, idx: usize, name: &str, part_type: [u8; GPT_GUID_LEN], unique_guid: [u8; GPT_GUID_LEN], flags: u64, size: Option<u64>, ) -> Result<()>1006 fn insert_before(
1007 &mut self,
1008 idx: usize,
1009 name: &str,
1010 part_type: [u8; GPT_GUID_LEN],
1011 unique_guid: [u8; GPT_GUID_LEN],
1012 flags: u64,
1013 size: Option<u64>,
1014 ) -> Result<()> {
1015 let (mut header, mut entries) = LoadBufferRef::from(&mut self.gpt.buffer[..]).secondary();
1016 // Gets position to the first NULL entry.
1017 let n = entries.iter().position(|v| v.is_null()).ok_or(Error::OutOfResources)?;
1018 let entries = &mut entries[..n + 1];
1019 // Caps `idx` to no more than the first NULL entry.
1020 let idx = min(n, idx);
1021 // Comptues the ending block index (non-inclusive) of the previous partition entry.
1022 // Entries are guaranteed sorted in `Self::new()`.
1023 let prev_end = match idx {
1024 0 => header.first,
1025 _ => entries[idx - 1].last + 1,
1026 };
1027 // Comptues the starting block index (inclusive) of the next partition entry.
1028 let next_start = match idx == n {
1029 true => header.last + 1,
1030 _ => entries[idx].first,
1031 };
1032 // Computes the size in number of blocks
1033 let blk_sz = self.disk.block_info().block_size;
1034 let blocks: u64 = match size {
1035 Some(v) => (SafeNum::from(v).round_up(blk_sz) / blk_sz).try_into()?,
1036 _ => next_start - prev_end, // If not given, uses up all the gap space
1037 };
1038 // Checks if there is enough space.
1039 if next_start - prev_end < blocks {
1040 return Err(Error::OutOfResources);
1041 }
1042 // Inserts the new entry.
1043 entries[idx..].rotate_right(1);
1044 let entry = &mut entries[idx];
1045 assert!(entry.is_null());
1046 entry.part_type = part_type;
1047 entry.guid = unique_guid;
1048 entry.flags = flags;
1049 entry.first = prev_end;
1050 entry.last = prev_end + blocks - 1;
1051 for (idx, ele) in name.encode_utf16().enumerate() {
1052 match idx < GPT_NAME_LEN_U16 {
1053 true => entry.name[idx] = ele,
1054 _ => break,
1055 }
1056 }
1057 header.entries_count += 1;
1058 Ok(())
1059 }
1060
1061 /// Adds a partition.
1062 ///
1063 /// # Args
1064 ///
1065 /// * `name`: Name of the partition.
1066 /// * `part_type`: Type GUID.
1067 /// * `unique_guid`: Unique GUID.
1068 /// * `flags`: Partition flag.
1069 /// * `size`: If Some(_), specifies the size in number of bytes for the partition. The method
1070 /// will round it up to multiple of disk block size and search for the first large enough
1071 /// space in the unused spae for putting the partition. If None, the method will add the
1072 /// partition at the last and have it consume all remaining usable disk space.
add( &mut self, name: &str, part_type: [u8; GPT_GUID_LEN], unique_guid: [u8; GPT_GUID_LEN], flags: u64, size: Option<u64>, ) -> Result<()>1073 pub fn add(
1074 &mut self,
1075 name: &str,
1076 part_type: [u8; GPT_GUID_LEN],
1077 unique_guid: [u8; GPT_GUID_LEN],
1078 flags: u64,
1079 size: Option<u64>,
1080 ) -> Result<()> {
1081 let (header, _) = LoadBufferRef::from(&mut self.gpt.buffer[..]).secondary();
1082 let entry_count = usize::try_from(header.entries_count).unwrap();
1083 let search_start = size.is_some().then_some(0).unwrap_or(entry_count);
1084 for i in search_start..entry_count + 1 {
1085 if self.insert_before(i, name, part_type, unique_guid, flags, size).is_ok() {
1086 return Ok(());
1087 }
1088 }
1089 Err(Error::OutOfResources)
1090 }
1091
1092 /// Persists the constructed GPT table to the disk and syncs. The builder is consumed.
persist(mut self) -> Result<()>1093 pub async fn persist(mut self) -> Result<()> {
1094 let (mut header, mut entries) = LoadBufferRef::from(&mut self.gpt.buffer[..]).secondary();
1095 header.update_entries_crc(entries.as_bytes());
1096 // Check validity. Should not fail if implementation is correct.
1097 check_header(self.disk.io(), &header, false).unwrap();
1098 check_entries(&header, entries.as_bytes()).unwrap();
1099 let blk_sz = self.disk.block_info().block_size;
1100 // Writes to secondary header/ entries
1101 self.disk.write(header.current * blk_sz, header.as_bytes_mut()).await?;
1102 self.disk.write(header.entries * blk_sz, entries.as_bytes_mut()).await?;
1103 // Clears primary header magic
1104 self.disk.write(blk_sz, &mut 0u64.to_be_bytes()).await?;
1105 // Re-syncs GPT
1106 self.disk.sync_gpt(&mut self.gpt).await?.res()
1107 }
1108 }
1109
1110 /// Helper for calculcating the Crc32.
crc32(data: &[u8]) -> u321111 fn crc32(data: &[u8]) -> u32 {
1112 let mut hasher = Hasher::new();
1113 hasher.update(data);
1114 hasher.finalize()
1115 }
1116
1117 #[cfg(test)]
1118 pub(crate) mod test {
1119 use super::*;
1120 use crate::test::TestDisk;
1121 use gbl_async::block_on;
1122
1123 /// A helper for creating a [TestDisk] from given data.
test_disk(data: impl AsRef<[u8]>) -> TestDisk1124 fn test_disk(data: impl AsRef<[u8]>) -> TestDisk {
1125 // All tests cases use pre-generated GPT disk of 512 block size.
1126 TestDisk::new_ram_alloc(512, 512, data.as_ref().to_vec()).unwrap()
1127 }
1128
1129 /// A helper for creating a [TestDisk] from given data and a [Gpt] for 128 entries.
test_disk_and_gpt(data: impl AsRef<[u8]>) -> (TestDisk, GptMax)1130 fn test_disk_and_gpt(data: impl AsRef<[u8]>) -> (TestDisk, GptMax) {
1131 (test_disk(data), new_gpt_max())
1132 }
1133
1134 #[test]
test_load_and_sync()1135 fn test_load_and_sync() {
1136 let (mut dev, mut gpt) = test_disk_and_gpt(include_bytes!("../test/gpt_test_1.bin"));
1137 block_on(dev.sync_gpt(&mut gpt)).unwrap();
1138
1139 assert_eq!(gpt.partition_iter().unwrap().count(), 2);
1140 gpt.find_partition("boot_a").unwrap();
1141 gpt.find_partition("boot_b").unwrap();
1142 assert!(gpt.find_partition("boot_c").is_err());
1143
1144 // Creating a new [Gpt] using the same buffer should reset the valid state.
1145 let gpt = Gpt::new(gpt.buffer).unwrap();
1146 assert!(gpt.partition_iter().is_err());
1147 assert!(gpt.find_partition("boot_a").is_err());
1148 assert!(gpt.find_partition("boot_b").is_err());
1149 }
1150
1151 #[test]
test_load_with_unaligned_buffer()1152 fn test_load_with_unaligned_buffer() {
1153 #[repr(align(8))]
1154 struct AlignedBuffer([u8; 34 * 1024]);
1155 let mut buffer = AlignedBuffer([0u8; 34 * 1024]);
1156 let buffer = &mut buffer.0[1..];
1157 assert_ne!(buffer.as_ptr() as usize % 2, 0);
1158 let mut disk = test_disk(include_bytes!("../test/gpt_test_1.bin"));
1159 let mut gpt = Gpt::new(buffer).unwrap();
1160 block_on(disk.sync_gpt(&mut gpt)).unwrap();
1161 }
1162
1163 #[test]
test_gpt_buffer_too_small()1164 fn test_gpt_buffer_too_small() {
1165 assert!(Gpt::new(vec![0u8; size_of::<GptLoadBufferN<0>>() - 1]).is_err());
1166 }
1167
1168 #[test]
test_gpt_buffer_not_enough_for_all_entries()1169 fn test_gpt_buffer_not_enough_for_all_entries() {
1170 let mut dev = test_disk(include_bytes!("../test/gpt_test_1.bin"));
1171 let mut gpt = new_gpt_n::<127>();
1172 assert_eq!(gpt.max_entries(), 127);
1173 // Actual entries_count is 128 in the GPT.
1174 assert!(block_on(dev.sync_gpt(&mut gpt)).unwrap().res().is_err());
1175 }
1176
1177 #[test]
test_good_gpt_no_repair_write()1178 fn test_good_gpt_no_repair_write() {
1179 let (mut dev, mut gpt) = test_disk_and_gpt(include_bytes!("../test/gpt_test_1.bin"));
1180 assert_eq!(block_on(dev.sync_gpt(&mut gpt)).unwrap(), GptSyncResult::BothValid);
1181 }
1182
1183 /// A helper for testing restoration of invalid primary/secondary header modified by caller.
test_gpt_sync_restore<'a>( modify_primary: impl FnOnce(&mut GptHeader, Ref<&mut [u8], [GptEntry]>), modify_secondary: impl FnOnce(&mut GptHeader, Ref<&mut [u8], [GptEntry]>), expect_primary_err: Error, expect_secondary_err: Error, )1184 fn test_gpt_sync_restore<'a>(
1185 modify_primary: impl FnOnce(&mut GptHeader, Ref<&mut [u8], [GptEntry]>),
1186 modify_secondary: impl FnOnce(&mut GptHeader, Ref<&mut [u8], [GptEntry]>),
1187 expect_primary_err: Error,
1188 expect_secondary_err: Error,
1189 ) {
1190 let disk_orig = include_bytes!("../test/gpt_test_1.bin");
1191
1192 // Restores from secondary to primary.
1193 let mut disk = disk_orig.to_vec();
1194 let (header, entries) = (&mut disk[512..]).split_at_mut(512);
1195 let mut header = GptHeader::from_bytes_mut(header);
1196 modify_primary(&mut header, Ref::<_, [GptEntry]>::new_slice(entries).unwrap());
1197 let (mut dev, mut gpt) = test_disk_and_gpt(&disk);
1198 assert_ne!(dev.io().storage(), disk_orig);
1199 let sync_res = block_on(dev.sync_gpt(&mut gpt)).unwrap();
1200 assert_eq!(sync_res, GptSyncResult::PrimaryRestored(expect_primary_err));
1201 assert_eq!(dev.io().storage(), disk_orig);
1202
1203 // Restores from primary to secondary.
1204 let mut disk = disk_orig.to_vec();
1205 let (entries, header) = (&mut disk[512..]).split_last_chunk_mut::<512>().unwrap();
1206 let (_, entries) = entries.split_last_chunk_mut::<{ 512 * 32 }>().unwrap();
1207 let mut header = GptHeader::from_bytes_mut(&mut header[..]);
1208 modify_secondary(&mut header, Ref::<_, [GptEntry]>::new_slice(&mut entries[..]).unwrap());
1209 let (mut dev, mut gpt) = test_disk_and_gpt(&disk);
1210 assert_ne!(dev.io().storage(), disk_orig);
1211 let sync_res = block_on(dev.sync_gpt(&mut gpt)).unwrap();
1212 assert_eq!(sync_res, GptSyncResult::SecondaryRestored(expect_secondary_err));
1213 assert_eq!(dev.io().storage(), disk_orig);
1214 }
1215
1216 #[test]
test_sync_gpt_incorrect_magic()1217 fn test_sync_gpt_incorrect_magic() {
1218 fn modify(hdr: &mut GptHeader, _: Ref<&mut [u8], [GptEntry]>) {
1219 hdr.magic = 0x123456;
1220 hdr.update_crc();
1221 }
1222 let err = Error::GptError(GptError::IncorrectMagic(0x123456));
1223 test_gpt_sync_restore(modify, modify, err, err);
1224 }
1225
1226 #[test]
test_sync_gpt_incorrect_crc()1227 fn test_sync_gpt_incorrect_crc() {
1228 fn modify(hdr: &mut GptHeader, _: Ref<&mut [u8], [GptEntry]>) {
1229 hdr.crc32 = !hdr.crc32;
1230 }
1231 let err = Error::GptError(GptError::IncorrectHeaderCrc);
1232 test_gpt_sync_restore(modify, modify, err, err);
1233 }
1234
1235 #[test]
test_sync_gpt_unexpected_header_size()1236 fn test_sync_gpt_unexpected_header_size() {
1237 fn modify(hdr: &mut GptHeader, _: Ref<&mut [u8], [GptEntry]>) {
1238 hdr.size += 1;
1239 hdr.update_crc();
1240 }
1241 let err = Error::GptError(GptError::UnexpectedHeaderSize { actual: 93, expect: 92 });
1242 test_gpt_sync_restore(modify, modify, err, err);
1243 }
1244
1245 #[test]
test_sync_gpt_unexpected_entry_size()1246 fn test_sync_gpt_unexpected_entry_size() {
1247 fn modify(hdr: &mut GptHeader, _: Ref<&mut [u8], [GptEntry]>) {
1248 hdr.entries_size += 1;
1249 hdr.update_crc();
1250 }
1251 let err = Error::GptError(GptError::UnexpectedEntrySize { actual: 129, expect: 128 });
1252 test_gpt_sync_restore(modify, modify, err, err);
1253 }
1254
1255 #[test]
test_sync_gpt_first_usable_gt_last()1256 fn test_sync_gpt_first_usable_gt_last() {
1257 fn modify(hdr: &mut GptHeader, _: Ref<&mut [u8], [GptEntry]>) {
1258 hdr.first = hdr.last;
1259 hdr.last = hdr.first - 2;
1260 hdr.update_crc();
1261 }
1262 let err = Error::GptError(GptError::InvalidFirstLastUsableBlock {
1263 first: 94,
1264 last: 92,
1265 range: (34, 94),
1266 });
1267 test_gpt_sync_restore(modify, modify, err, err);
1268 }
1269
1270 #[test]
test_sync_gpt_first_usable_out_of_range()1271 fn test_sync_gpt_first_usable_out_of_range() {
1272 fn modify(hdr: &mut GptHeader, _: Ref<&mut [u8], [GptEntry]>) {
1273 hdr.first = 33;
1274 hdr.update_crc();
1275 }
1276 let err = Error::GptError(GptError::InvalidFirstLastUsableBlock {
1277 first: 33,
1278 last: 94,
1279 range: (34, 94),
1280 });
1281 test_gpt_sync_restore(modify, modify, err, err);
1282 }
1283
1284 #[test]
test_sync_gpt_last_usable_out_of_range()1285 fn test_sync_gpt_last_usable_out_of_range() {
1286 fn modify(hdr: &mut GptHeader, _: Ref<&mut [u8], [GptEntry]>) {
1287 hdr.last += 1;
1288 hdr.update_crc();
1289 }
1290 let err = Error::GptError(GptError::InvalidFirstLastUsableBlock {
1291 first: 34,
1292 last: 95,
1293 range: (34, 94),
1294 });
1295 test_gpt_sync_restore(modify, modify, err, err);
1296 }
1297
1298 #[test]
test_sync_gpt_primary_entries_out_of_range()1299 fn test_sync_gpt_primary_entries_out_of_range() {
1300 test_gpt_sync_restore(
1301 |hdr, _| {
1302 hdr.entries = 1;
1303 hdr.update_crc();
1304 },
1305 |hdr, _| {
1306 hdr.entries = hdr.last;
1307 hdr.update_crc();
1308 },
1309 Error::GptError(GptError::InvalidPrimaryEntriesStart {
1310 value: 1,
1311 expect_range: (2, 2),
1312 }),
1313 Error::GptError(GptError::InvalidSecondaryEntriesStart {
1314 value: 94,
1315 expect_range: (95, 95),
1316 }),
1317 );
1318 }
1319
1320 #[test]
test_sync_gpt_incorrect_entry_crc()1321 fn test_sync_gpt_incorrect_entry_crc() {
1322 fn modify(hdr: &mut GptHeader, _: Ref<&mut [u8], [GptEntry]>) {
1323 hdr.entries_crc = !hdr.entries_crc;
1324 hdr.update_crc();
1325 }
1326 let err = Error::GptError(GptError::IncorrectEntriesCrc);
1327 test_gpt_sync_restore(modify, modify, err, err);
1328 }
1329
1330 #[test]
test_sync_gpt_partition_range_overflow()1331 fn test_sync_gpt_partition_range_overflow() {
1332 fn modify(hdr: &mut GptHeader, mut entries: Ref<&mut [u8], [GptEntry]>) {
1333 entries[1].last = hdr.last + 1;
1334 hdr.update_entries_crc(entries.as_bytes());
1335 }
1336 let err = Error::GptError(GptError::InvalidPartitionRange {
1337 idx: 2,
1338 part_range: (50, 95),
1339 usable_range: (34, 94),
1340 });
1341 test_gpt_sync_restore(modify, modify, err, err);
1342 }
1343
1344 #[test]
test_sync_gpt_invalid_partition_range()1345 fn test_sync_gpt_invalid_partition_range() {
1346 fn modify(hdr: &mut GptHeader, mut entries: Ref<&mut [u8], [GptEntry]>) {
1347 entries[1].first = entries[1].last;
1348 entries[1].last = entries[1].first - 2;
1349 hdr.update_entries_crc(entries.as_bytes());
1350 }
1351 let err = Error::GptError(GptError::InvalidPartitionRange {
1352 idx: 2,
1353 part_range: (73, 71),
1354 usable_range: (34, 94),
1355 });
1356 test_gpt_sync_restore(modify, modify, err, err);
1357 }
1358
1359 #[test]
test_sync_gpt_partition_overlap()1360 fn test_sync_gpt_partition_overlap() {
1361 fn modify(hdr: &mut GptHeader, mut entries: Ref<&mut [u8], [GptEntry]>) {
1362 entries[0].last = entries[1].first;
1363 entries.swap(0, 1);
1364 hdr.update_entries_crc(entries.as_bytes());
1365 }
1366 let err = Error::GptError(GptError::PartitionRangeOverlap {
1367 prev: (2, 34, 50),
1368 next: (1, 50, 73),
1369 });
1370 test_gpt_sync_restore(modify, modify, err, err);
1371 }
1372
1373 #[test]
test_sync_gpt_zero_partition_type_guid()1374 fn test_sync_gpt_zero_partition_type_guid() {
1375 fn modify(hdr: &mut GptHeader, mut entries: Ref<&mut [u8], [GptEntry]>) {
1376 entries[1].part_type = [0u8; GPT_GUID_LEN];
1377 hdr.update_entries_crc(entries.as_bytes());
1378 }
1379 let err = Error::GptError(GptError::ZeroPartitionTypeGUID { idx: 2 });
1380 test_gpt_sync_restore(modify, modify, err, err);
1381 }
1382
1383 #[test]
test_sync_gpt_zero_partition_unique_guid()1384 fn test_sync_gpt_zero_partition_unique_guid() {
1385 fn modify(hdr: &mut GptHeader, mut entries: Ref<&mut [u8], [GptEntry]>) {
1386 entries[1].guid = [0u8; GPT_GUID_LEN];
1387 hdr.update_entries_crc(entries.as_bytes());
1388 }
1389 let err = Error::GptError(GptError::ZeroPartitionUniqueGUID { idx: 2 });
1390 test_gpt_sync_restore(modify, modify, err, err);
1391 }
1392
1393 #[test]
test_load_gpt_disk_primary_override_secondary()1394 fn test_load_gpt_disk_primary_override_secondary() {
1395 let mut disk = include_bytes!("../test/gpt_test_1.bin").to_vec();
1396 // Modifies secondary header.
1397 let secondary_hdr = GptHeader::from_bytes_mut(disk.last_chunk_mut::<512>().unwrap());
1398 secondary_hdr.revision = !secondary_hdr.revision;
1399 secondary_hdr.update_crc();
1400 let (mut dev, mut gpt) = test_disk_and_gpt(&disk);
1401 assert_eq!(
1402 block_on(dev.sync_gpt(&mut gpt)).unwrap(),
1403 GptSyncResult::SecondaryRestored(Error::GptError(GptError::DifferentFromPrimary)),
1404 );
1405 }
1406
1407 #[test]
test_load_gpt_disk_too_small()1408 fn test_load_gpt_disk_too_small() {
1409 let disk_orig = include_bytes!("../test/gpt_test_1.bin");
1410 let mut disk = disk_orig.to_vec();
1411 // Resizes so that it's not enough to hold a full 128 maximum entries.
1412 // MBR + (header + entries) * 2 - 1
1413 disk.resize((1 + (32 + 1) * 2 - 1) * 512, 0);
1414 let (mut dev, mut gpt) = test_disk_and_gpt(&disk);
1415 let sync_res = block_on(dev.sync_gpt(&mut gpt)).unwrap();
1416 let err = Error::GptError(GptError::DiskTooSmall);
1417 assert_eq!(sync_res, GptSyncResult::NoValidGpt { primary: err, secondary: err });
1418 }
1419
1420 #[test]
test_uninitialized_gpt()1421 fn test_uninitialized_gpt() {
1422 let disk = include_bytes!("../test/gpt_test_1.bin");
1423 // Load a good GPT first.
1424 let (mut dev, mut gpt) = test_disk_and_gpt(&disk);
1425 assert_eq!(block_on(dev.sync_gpt(&mut gpt)).unwrap(), GptSyncResult::BothValid);
1426 gpt.find_partition("boot_a").unwrap();
1427 // Corrupt GPT.
1428 block_on(dev.write(0, &mut vec![0u8; disk.len()])).unwrap();
1429 assert!(block_on(dev.sync_gpt(&mut gpt)).unwrap().res().is_err());
1430 assert!(gpt.find_partition("").is_err());
1431 }
1432
1433 #[test]
test_update_gpt()1434 fn test_update_gpt() {
1435 let disk_orig = include_bytes!("../test/gpt_test_1.bin");
1436 let mut disk = disk_orig.to_vec();
1437 // Erases all GPT headers.
1438 disk[512..][..512].fill(0);
1439 disk.last_chunk_mut::<512>().unwrap().fill(0);
1440
1441 let (mut dev, mut gpt) = test_disk_and_gpt(&disk);
1442
1443 assert_ne!(dev.io().storage(), disk_orig);
1444 let mut mbr_primary = disk_orig[..34 * 512].to_vec();
1445 block_on(dev.update_gpt(&mut mbr_primary, false, &mut gpt)).unwrap();
1446 assert_eq!(dev.io().storage(), disk_orig);
1447 }
1448
1449 #[test]
test_update_gpt_has_existing_valid_secondary()1450 fn test_update_gpt_has_existing_valid_secondary() {
1451 let disk_orig = include_bytes!("../test/gpt_test_1.bin");
1452 let mut disk = disk_orig.to_vec();
1453 // Erases all GPT headers.
1454 disk[512..][..512].fill(0);
1455 // Leaves a valid but different secondary GPT.
1456 let secondary_hdr = GptHeader::from_bytes_mut(disk.last_chunk_mut::<512>().unwrap());
1457 secondary_hdr.revision = !secondary_hdr.revision;
1458 secondary_hdr.update_crc();
1459
1460 let (mut dev, mut gpt) = test_disk_and_gpt(&disk);
1461
1462 assert_ne!(dev.io().storage(), disk_orig);
1463 let mut mbr_primary = disk_orig[..34 * 512].to_vec();
1464 block_on(dev.update_gpt(&mut mbr_primary, false, &mut gpt)).unwrap();
1465 assert_eq!(dev.io().storage(), disk_orig);
1466 }
1467
1468 #[test]
test_update_gpt_last_usable_adjusted()1469 fn test_update_gpt_last_usable_adjusted() {
1470 let disk_orig = include_bytes!("../test/gpt_test_1.bin");
1471 let mut disk = disk_orig.to_vec();
1472 // Erases all GPT headers.
1473 disk[512..][..512].fill(0);
1474 disk.last_chunk_mut::<512>().unwrap().fill(0);
1475 // Doubles the disk size.
1476 disk.resize(disk_orig.len() * 2, 0);
1477
1478 let (mut dev, mut gpt) = test_disk_and_gpt(&disk);
1479
1480 assert_ne!(dev.io().storage, disk_orig);
1481 let mut mbr_primary = disk_orig[..34 * 512].to_vec();
1482 block_on(dev.update_gpt(&mut mbr_primary, true, &mut gpt)).unwrap();
1483 let expected_last = (disk.len() - GPT_MAX_NUM_ENTRIES_SIZE - 512) / 512 - 1;
1484
1485 let (primary, secondary) = dev.io().storage().split_last_chunk_mut::<512>().unwrap();
1486 let primary_hdr = GptHeader::from_bytes_mut(&mut primary[512..]);
1487 let secondary_hdr = GptHeader::from_bytes_mut(secondary);
1488 // Header's last usable block is updated.
1489 assert_eq!({ primary_hdr.last }, expected_last.try_into().unwrap());
1490 assert_eq!({ primary_hdr.backup }, (disk.len() / 512 - 1).try_into().unwrap());
1491 assert_eq!({ secondary_hdr.last }, expected_last.try_into().unwrap());
1492 }
1493
1494 #[test]
test_update_gpt_resize()1495 fn test_update_gpt_resize() {
1496 let disk_orig = include_bytes!("../test/gpt_test_1.bin");
1497 let mut disk = disk_orig.to_vec();
1498 // Erases all GPT headers.
1499 disk[512..][..512].fill(0);
1500 disk.last_chunk_mut::<512>().unwrap().fill(0);
1501 // Doubles the disk size.
1502 disk.resize(disk_orig.len() * 2, 0);
1503
1504 let (mut dev, mut gpt) = test_disk_and_gpt(&disk);
1505
1506 assert_ne!(dev.io().storage, disk_orig);
1507 let mut mbr_primary = disk_orig[..34 * 512].to_vec();
1508 block_on(dev.update_gpt(&mut mbr_primary, true, &mut gpt)).unwrap();
1509 // Last entry is extended.
1510 let expected_last = (disk.len() - GPT_MAX_NUM_ENTRIES_SIZE - 512) / 512 - 1;
1511 assert_eq!({ gpt.entries().unwrap()[1].last }, expected_last.try_into().unwrap());
1512 }
1513
1514 #[test]
test_update_gpt_new_partition_out_of_range()1515 fn test_update_gpt_new_partition_out_of_range() {
1516 // `gpt_test_1.bin` has a 8k "boot_a" and a 12k "boot_b". Thus partitions space is 40
1517 // blocks (512 bytes block size) and in total the GPT disk needs (40 + 1 + (33) * 2) = 107
1518 // blocks.
1519 let (mut dev, mut gpt) = test_disk_and_gpt(&vec![0u8; 106 * 512]);
1520 let mut mbr_primary = include_bytes!("../test/gpt_test_1.bin")[..34 * 512].to_vec();
1521 assert!(block_on(dev.update_gpt(&mut mbr_primary, true, &mut gpt)).is_err());
1522 }
1523
1524 #[test]
test_update_gpt_buffer_truncated()1525 fn test_update_gpt_buffer_truncated() {
1526 let mut disk = include_bytes!("../test/gpt_test_1.bin").to_vec();
1527 let (mut dev, mut gpt) = test_disk_and_gpt(&disk);
1528
1529 // Less than 1 MBR block.
1530 assert_eq!(
1531 block_on(dev.update_gpt(&mut disk[..511], false, &mut gpt)),
1532 Err(Error::BufferTooSmall(Some(1024)))
1533 );
1534
1535 // Less than MBR + GPT header.
1536 assert_eq!(
1537 block_on(dev.update_gpt(&mut disk[..1023], false, &mut gpt)),
1538 Err(Error::BufferTooSmall(Some(1024)))
1539 );
1540
1541 // Less than MBR + GPT header + entries.
1542 assert_eq!(
1543 block_on(dev.update_gpt(&mut disk[..34 * 512 - 1], false, &mut gpt)),
1544 Err(Error::BufferTooSmall(Some(34 * 512)))
1545 );
1546 }
1547
1548 #[test]
test_update_gpt_check_header_fail()1549 fn test_update_gpt_check_header_fail() {
1550 let disk = include_bytes!("../test/gpt_test_1.bin");
1551 let (mut dev, mut gpt) = test_disk_and_gpt(&disk);
1552 let mut mbr_primary = disk[..34 * 512].to_vec();
1553 // Corrupts the first byte of the GPT header.
1554 mbr_primary[512] = !mbr_primary[512];
1555 assert_eq!(
1556 block_on(dev.update_gpt(&mut mbr_primary, false, &mut gpt)),
1557 Err(Error::GptError(GptError::IncorrectMagic(0x54524150204946BA)))
1558 );
1559 }
1560
1561 #[test]
test_update_gpt_check_entries_fail()1562 fn test_update_gpt_check_entries_fail() {
1563 let disk = include_bytes!("../test/gpt_test_1.bin");
1564 let (mut dev, mut gpt) = test_disk_and_gpt(&disk);
1565 let mut mbr_primary = disk[..34 * 512].to_vec();
1566 // Corrupts the first byte of the entries.
1567 mbr_primary[1024] = !mbr_primary[1024];
1568 assert_eq!(
1569 block_on(dev.update_gpt(&mut mbr_primary, false, &mut gpt)),
1570 Err(Error::GptError(GptError::IncorrectEntriesCrc))
1571 );
1572 }
1573
1574 #[test]
test_erase_gpt_no_gpt()1575 fn test_erase_gpt_no_gpt() {
1576 let (mut dev, mut gpt) = test_disk_and_gpt(&[0u8; 1024 * 1024]);
1577 block_on(dev.erase_gpt(&mut gpt)).unwrap();
1578 }
1579
1580 #[test]
test_erase_gpt()1581 fn test_erase_gpt() {
1582 let (mut dev, mut gpt) = test_disk_and_gpt(include_bytes!("../test/gpt_test_1.bin"));
1583 block_on(dev.erase_gpt(&mut gpt)).unwrap();
1584 const GPT_SECTOR: usize = 33 * 512;
1585 assert_eq!(dev.io().storage[512..][..GPT_SECTOR], vec![0u8; GPT_SECTOR]);
1586 assert_eq!(*dev.io().storage.last_chunk::<GPT_SECTOR>().unwrap(), *vec![0u8; GPT_SECTOR]);
1587 assert!(matches!(
1588 block_on(dev.sync_gpt(&mut gpt)).unwrap(),
1589 GptSyncResult::NoValidGpt { .. }
1590 ));
1591 }
1592
1593 #[test]
test_zero_partition_size()1594 fn test_zero_partition_size() {
1595 let disk = include_bytes!("../test/gpt_test_1.bin").to_vec();
1596 let (mut dev, mut gpt) = test_disk_and_gpt(&disk);
1597 let (mut builder, _) = GptBuilder::new(&mut dev, &mut gpt).unwrap();
1598 assert_eq!(builder.remove("boot_a"), Ok(true));
1599 assert_eq!(builder.remove("boot_b"), Ok(true));
1600 builder.add("boot_b", [1u8; GPT_GUID_LEN], [1u8; GPT_GUID_LEN], 0, Some(0)).unwrap();
1601 block_on(builder.persist()).unwrap();
1602 assert_eq!(gpt.partition_iter().unwrap().next().unwrap().size().unwrap(), 0);
1603 }
1604
1605 #[test]
test_sync_gpt_non_sorted_entries()1606 fn test_sync_gpt_non_sorted_entries() {
1607 let mut disk = include_bytes!("../test/gpt_test_1.bin").to_vec();
1608 let (header, entries) = disk[512..].split_at_mut(512);
1609 let header = GptHeader::from_bytes_mut(header);
1610 let mut entries = Ref::<_, [GptEntry]>::new_slice(entries).unwrap();
1611 // Makes partition non-sorted.
1612 entries.swap(0, 1);
1613 header.update_entries_crc(entries.as_bytes());
1614 let (mut dev, mut gpt) = test_disk_and_gpt(&disk);
1615 block_on(dev.sync_gpt(&mut gpt)).unwrap().res().unwrap();
1616 }
1617
1618 #[test]
test_gpt_builder_initialize_gpt_if_no_valid_gpt()1619 fn test_gpt_builder_initialize_gpt_if_no_valid_gpt() {
1620 let (mut dev, mut gpt) = test_disk_and_gpt(vec![0u8; 1024 * 1024]);
1621 let (builder, valid) = GptBuilder::new(&mut dev, &mut gpt).unwrap();
1622 assert!(!valid);
1623 block_on(builder.persist()).unwrap();
1624 // A new GPT is created.
1625 block_on(dev.sync_gpt(&mut gpt)).unwrap().res().unwrap();
1626 assert!(gpt.partition_iter().unwrap().next().is_none());
1627 }
1628
1629 #[test]
test_gpt_builder_remove_partition()1630 fn test_gpt_builder_remove_partition() {
1631 let (mut dev, mut gpt) = test_disk_and_gpt(include_bytes!("../test/gpt_test_1.bin"));
1632 let (mut builder, valid) = GptBuilder::new(&mut dev, &mut gpt).unwrap();
1633 assert!(valid);
1634 assert_eq!(builder.remove("boot_b"), Ok(true));
1635 assert_eq!(builder.remove("non-existent"), Ok(false));
1636 block_on(builder.persist()).unwrap();
1637 block_on(dev.sync_gpt(&mut gpt)).unwrap().res().unwrap();
1638 let part_iter = gpt.partition_iter().unwrap();
1639 assert_eq!(
1640 part_iter.map(|v| v.name().unwrap().into()).collect::<Vec<String>>(),
1641 ["boot_a"]
1642 );
1643 }
1644
1645 #[test]
test_gpt_builder_add_partition_find_first()1646 fn test_gpt_builder_add_partition_find_first() {
1647 let (mut dev, mut gpt) = test_disk_and_gpt(include_bytes!("../test/gpt_test_1.bin"));
1648 let (mut builder, _) = GptBuilder::new(&mut dev, &mut gpt).unwrap();
1649 assert!(builder.remove("boot_a").unwrap());
1650 // Adds at the beginning.
1651 builder.add("new_0", [1u8; GPT_GUID_LEN], [1u8; GPT_GUID_LEN], 0, Some(1024)).unwrap();
1652 // Adds following "new_0"
1653 builder.add("new_1", [1u8; GPT_GUID_LEN], [1u8; GPT_GUID_LEN], 0, Some(1)).unwrap();
1654 block_on(builder.persist()).unwrap();
1655 block_on(dev.sync_gpt(&mut gpt)).unwrap().res().unwrap();
1656 assert_eq!(gpt.find_partition("new_0").unwrap().absolute_range().unwrap(), (17408, 18432));
1657 assert_eq!(gpt.find_partition("new_1").unwrap().absolute_range().unwrap(), (18432, 18944));
1658 assert_eq!(gpt.find_partition("boot_b").unwrap().absolute_range().unwrap(), (25600, 37888));
1659 }
1660
1661 #[test]
test_gpt_builder_non_sorted_add_partition()1662 fn test_gpt_builder_non_sorted_add_partition() {
1663 let mut disk = include_bytes!("../test/gpt_test_1.bin").to_vec();
1664 let (mut dev, mut gpt) = test_disk_and_gpt(&disk);
1665 let (header, entries) = disk[512..].split_at_mut(512);
1666 let header = GptHeader::from_bytes_mut(header);
1667 let mut entries = Ref::<_, [GptEntry]>::new_slice(entries).unwrap();
1668 // Makes partition non-sorted.
1669 entries.swap(0, 1);
1670 header.update_entries_crc(entries.as_bytes());
1671
1672 let (mut builder, _) = GptBuilder::new(&mut dev, &mut gpt).unwrap();
1673 // Adds following boot_b.
1674 builder.add("new", [1u8; GPT_GUID_LEN], [1u8; GPT_GUID_LEN], 0, Some(1024)).unwrap();
1675 block_on(builder.persist()).unwrap();
1676 assert_eq!(gpt.find_partition("boot_a").unwrap().absolute_range().unwrap(), (17408, 25600));
1677 assert_eq!(gpt.find_partition("boot_b").unwrap().absolute_range().unwrap(), (25600, 37888));
1678 assert_eq!(gpt.find_partition("new").unwrap().absolute_range().unwrap(), (37888, 38912));
1679 }
1680
1681 #[test]
test_gpt_builder_add_partition_append()1682 fn test_gpt_builder_add_partition_append() {
1683 let (mut dev, mut gpt) = test_disk_and_gpt(include_bytes!("../test/gpt_test_1.bin"));
1684 let (mut builder, _) = GptBuilder::new(&mut dev, &mut gpt).unwrap();
1685 assert!(builder.remove("boot_b").unwrap());
1686 // Adds following "boot_a".
1687 builder.add("new_0", [1u8; GPT_GUID_LEN], [1u8; GPT_GUID_LEN], 0, Some(1024)).unwrap();
1688 // Consumes the rest of the space.
1689 builder.add("new_1", [1u8; GPT_GUID_LEN], [1u8; GPT_GUID_LEN], 0, None).unwrap();
1690 block_on(builder.persist()).unwrap();
1691 block_on(dev.sync_gpt(&mut gpt)).unwrap().res().unwrap();
1692 assert_eq!(gpt.find_partition("boot_a").unwrap().absolute_range().unwrap(), (17408, 25600));
1693 assert_eq!(gpt.find_partition("new_0").unwrap().absolute_range().unwrap(), (25600, 26624));
1694 assert_eq!(gpt.find_partition("new_1").unwrap().absolute_range().unwrap(), (26624, 48640));
1695 }
1696
1697 #[test]
test_gpt_builder_not_enough_resource()1698 fn test_gpt_builder_not_enough_resource() {
1699 // Create a Gpt that can only load 1 entry.
1700 let mut gpt = new_gpt_n::<1>();
1701 let mut dev = test_disk(vec![0u8; 64 * 1024]);
1702 let (mut builder, _) = GptBuilder::new(&mut dev, &mut gpt).unwrap();
1703 builder.add("new_0", [1u8; GPT_GUID_LEN], [1u8; GPT_GUID_LEN], 0, Some(1024)).unwrap();
1704 assert!(builder.add("new_1", [1u8; GPT_GUID_LEN], [1u8; GPT_GUID_LEN], 0, None).is_err());
1705 }
1706 }
1707