1 // Copyright 2019 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 use std::cmp::max;
6 use std::cmp::min;
7 use std::collections::HashSet;
8 use std::convert::TryInto;
9 use std::fs::File;
10 use std::fs::OpenOptions;
11 use std::io;
12 use std::io::ErrorKind;
13 use std::io::Read;
14 use std::io::Seek;
15 use std::io::SeekFrom;
16 use std::io::Write;
17 use std::ops::Range;
18 use std::path::Path;
19 use std::path::PathBuf;
20 use std::sync::atomic::AtomicBool;
21 use std::sync::atomic::Ordering;
22 use std::sync::Arc;
23
24 use async_trait::async_trait;
25 use base::AsRawDescriptors;
26 use base::FileAllocate;
27 use base::FileReadWriteAtVolatile;
28 use base::FileSetLen;
29 use base::RawDescriptor;
30 use base::VolatileSlice;
31 use crc32fast::Hasher;
32 use cros_async::BackingMemory;
33 use cros_async::Executor;
34 use cros_async::MemRegionIter;
35 use protobuf::Message;
36 use protos::cdisk_spec;
37 use protos::cdisk_spec::ComponentDisk;
38 use protos::cdisk_spec::CompositeDisk;
39 use protos::cdisk_spec::ReadWriteCapability;
40 use remain::sorted;
41 use thiserror::Error;
42 use uuid::Uuid;
43
44 use crate::gpt;
45 use crate::gpt::write_gpt_header;
46 use crate::gpt::write_protective_mbr;
47 use crate::gpt::GptPartitionEntry;
48 use crate::gpt::GPT_BEGINNING_SIZE;
49 use crate::gpt::GPT_END_SIZE;
50 use crate::gpt::GPT_HEADER_SIZE;
51 use crate::gpt::GPT_NUM_PARTITIONS;
52 use crate::gpt::GPT_PARTITION_ENTRY_SIZE;
53 use crate::gpt::SECTOR_SIZE;
54 use crate::open_disk_file;
55 use crate::AsyncDisk;
56 use crate::DiskFile;
57 use crate::DiskFileParams;
58 use crate::DiskGetLen;
59 use crate::ImageType;
60 use crate::ToAsyncDisk;
61
62 /// The amount of padding needed between the last partition entry and the first partition, to align
63 /// the partition appropriately. The two sectors are for the MBR and the GPT header.
64 const PARTITION_ALIGNMENT_SIZE: usize = GPT_BEGINNING_SIZE as usize
65 - 2 * SECTOR_SIZE as usize
66 - GPT_NUM_PARTITIONS as usize * GPT_PARTITION_ENTRY_SIZE as usize;
67 const HEADER_PADDING_LENGTH: usize = SECTOR_SIZE as usize - GPT_HEADER_SIZE as usize;
68 // Keep all partitions 4k aligned for performance.
69 const PARTITION_SIZE_SHIFT: u8 = 12;
70 // Keep the disk size a multiple of 64k for crosvm's virtio_blk driver.
71 const DISK_SIZE_SHIFT: u8 = 16;
72
73 // From https://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_type_GUIDs.
74 const LINUX_FILESYSTEM_GUID: Uuid = Uuid::from_u128(0x0FC63DAF_8483_4772_8E79_3D69D8477DE4);
75 const EFI_SYSTEM_PARTITION_GUID: Uuid = Uuid::from_u128(0xC12A7328_F81F_11D2_BA4B_00A0C93EC93B);
76
77 #[sorted]
78 #[derive(Error, Debug)]
79 pub enum Error {
80 #[error("failed to use underlying disk: \"{0}\"")]
81 DiskError(Box<crate::Error>),
82 #[error("duplicate GPT partition label \"{0}\"")]
83 DuplicatePartitionLabel(String),
84 #[error("failed to write GPT header: \"{0}\"")]
85 GptError(gpt::Error),
86 #[error("invalid magic header for composite disk format")]
87 InvalidMagicHeader,
88 #[error("invalid partition path {0:?}")]
89 InvalidPath(PathBuf),
90 #[error("failed to parse specification proto: \"{0}\"")]
91 InvalidProto(protobuf::Error),
92 #[error("invalid specification: \"{0}\"")]
93 InvalidSpecification(String),
94 #[error("no image files for partition {0:?}")]
95 NoImageFiles(PartitionInfo),
96 #[error("failed to open component file \"{1}\": \"{0}\"")]
97 OpenFile(io::Error, String),
98 #[error("failed to read specification: \"{0}\"")]
99 ReadSpecificationError(io::Error),
100 #[error("Read-write partition {0:?} size is not a multiple of {}.", 1 << PARTITION_SIZE_SHIFT)]
101 UnalignedReadWrite(PartitionInfo),
102 #[error("unknown version {0} in specification")]
103 UnknownVersion(u64),
104 #[error("unsupported component disk type \"{0:?}\"")]
105 UnsupportedComponent(ImageType),
106 #[error("failed to write composite disk header: \"{0}\"")]
107 WriteHeader(io::Error),
108 #[error("failed to write specification proto: \"{0}\"")]
109 WriteProto(protobuf::Error),
110 #[error("failed to write zero filler: \"{0}\"")]
111 WriteZeroFiller(io::Error),
112 }
113
114 impl From<gpt::Error> for Error {
from(e: gpt::Error) -> Self115 fn from(e: gpt::Error) -> Self {
116 Self::GptError(e)
117 }
118 }
119
120 pub type Result<T> = std::result::Result<T, Error>;
121
122 #[derive(Debug)]
123 struct ComponentDiskPart {
124 file: Box<dyn DiskFile>,
125 offset: u64,
126 length: u64,
127 needs_fsync: AtomicBool,
128 }
129
130 impl ComponentDiskPart {
range(&self) -> Range<u64>131 fn range(&self) -> Range<u64> {
132 self.offset..(self.offset + self.length)
133 }
134 }
135
136 /// Represents a composite virtual disk made out of multiple component files. This is described on
137 /// disk by a protocol buffer file that lists out the component file locations and their offsets
138 /// and lengths on the virtual disk. The spaces covered by the component disks must be contiguous
139 /// and not overlapping.
140 #[derive(Debug)]
141 pub struct CompositeDiskFile {
142 component_disks: Vec<ComponentDiskPart>,
143 // We keep the root composite file open so that the file lock is not dropped.
144 _disk_spec_file: File,
145 }
146
147 // TODO(b/271381851): implement `try_clone`. It allows virtio-blk to run multiple workers.
148 impl DiskFile for CompositeDiskFile {}
149
ranges_overlap(a: &Range<u64>, b: &Range<u64>) -> bool150 fn ranges_overlap(a: &Range<u64>, b: &Range<u64>) -> bool {
151 range_intersection(a, b).is_some()
152 }
153
range_intersection(a: &Range<u64>, b: &Range<u64>) -> Option<Range<u64>>154 fn range_intersection(a: &Range<u64>, b: &Range<u64>) -> Option<Range<u64>> {
155 let r = Range {
156 start: max(a.start, b.start),
157 end: min(a.end, b.end),
158 };
159 if r.is_empty() {
160 None
161 } else {
162 Some(r)
163 }
164 }
165
166 /// The version of the composite disk format supported by this implementation.
167 const COMPOSITE_DISK_VERSION: u64 = 2;
168
169 /// A magic string placed at the beginning of a composite disk file to identify it.
170 pub const CDISK_MAGIC: &str = "composite_disk\x1d";
171
172 impl CompositeDiskFile {
new(mut disks: Vec<ComponentDiskPart>, disk_spec_file: File) -> Result<CompositeDiskFile>173 fn new(mut disks: Vec<ComponentDiskPart>, disk_spec_file: File) -> Result<CompositeDiskFile> {
174 disks.sort_by(|d1, d2| d1.offset.cmp(&d2.offset));
175 for s in disks.windows(2) {
176 if s[0].offset == s[1].offset {
177 return Err(Error::InvalidSpecification(format!(
178 "Two disks at offset {}",
179 s[0].offset
180 )));
181 }
182 }
183 Ok(CompositeDiskFile {
184 component_disks: disks,
185 _disk_spec_file: disk_spec_file,
186 })
187 }
188
189 /// Set up a composite disk by reading the specification from a file. The file must consist of
190 /// the CDISK_MAGIC string followed by one binary instance of the CompositeDisk protocol
191 /// buffer. Returns an error if it could not read the file or if the specification was invalid.
from_file(mut file: File, params: DiskFileParams) -> Result<CompositeDiskFile>192 pub fn from_file(mut file: File, params: DiskFileParams) -> Result<CompositeDiskFile> {
193 file.seek(SeekFrom::Start(0))
194 .map_err(Error::ReadSpecificationError)?;
195 let mut magic_space = [0u8; CDISK_MAGIC.len()];
196 file.read_exact(&mut magic_space[..])
197 .map_err(Error::ReadSpecificationError)?;
198 if magic_space != CDISK_MAGIC.as_bytes() {
199 return Err(Error::InvalidMagicHeader);
200 }
201 let proto: cdisk_spec::CompositeDisk =
202 Message::parse_from_reader(&mut file).map_err(Error::InvalidProto)?;
203 if proto.version > COMPOSITE_DISK_VERSION {
204 return Err(Error::UnknownVersion(proto.version));
205 }
206 let mut disks: Vec<ComponentDiskPart> = proto
207 .component_disks
208 .iter()
209 .map(|disk| {
210 let writable = !params.is_read_only
211 && disk.read_write_capability
212 == cdisk_spec::ReadWriteCapability::READ_WRITE.into();
213 let component_path = PathBuf::from(&disk.file_path);
214 let path = if component_path.is_relative() || proto.version > 1 {
215 params.path.parent().unwrap().join(component_path)
216 } else {
217 component_path
218 };
219
220 // Note that a read-only parts of a composite disk should NOT be marked sparse,
221 // as the action of marking them sparse is a write. This may seem a little hacky,
222 // and it is; however:
223 // (a) there is not a good way to pass sparseness parameters per composite disk
224 // part (the proto does not have fields for it).
225 // (b) this override of sorts always matches the correct user intent.
226 Ok(ComponentDiskPart {
227 file: open_disk_file(DiskFileParams {
228 path: path.to_owned(),
229 is_read_only: !writable,
230 is_sparse_file: params.is_sparse_file && writable,
231 // TODO: Should pass `params.is_overlapped` through here. Needs testing.
232 is_overlapped: false,
233 is_direct: params.is_direct,
234 lock: params.lock,
235 depth: params.depth + 1,
236 })
237 .map_err(|e| Error::DiskError(Box::new(e)))?,
238 offset: disk.offset,
239 length: 0, // Assigned later
240 needs_fsync: AtomicBool::new(false),
241 })
242 })
243 .collect::<Result<Vec<ComponentDiskPart>>>()?;
244 disks.sort_by(|d1, d2| d1.offset.cmp(&d2.offset));
245 for i in 0..(disks.len() - 1) {
246 let length = disks[i + 1].offset - disks[i].offset;
247 if length == 0 {
248 let text = format!("Two disks at offset {}", disks[i].offset);
249 return Err(Error::InvalidSpecification(text));
250 }
251 if let Some(disk) = disks.get_mut(i) {
252 disk.length = length;
253 } else {
254 let text = format!("Unable to set disk length {}", length);
255 return Err(Error::InvalidSpecification(text));
256 }
257 }
258 if let Some(last_disk) = disks.last_mut() {
259 if proto.length <= last_disk.offset {
260 let text = format!(
261 "Full size of disk doesn't match last offset. {} <= {}",
262 proto.length, last_disk.offset
263 );
264 return Err(Error::InvalidSpecification(text));
265 }
266 last_disk.length = proto.length - last_disk.offset;
267 } else {
268 let text = format!("Unable to set last disk length to end at {}", proto.length);
269 return Err(Error::InvalidSpecification(text));
270 }
271
272 CompositeDiskFile::new(disks, file)
273 }
274
length(&self) -> u64275 fn length(&self) -> u64 {
276 if let Some(disk) = self.component_disks.last() {
277 disk.offset + disk.length
278 } else {
279 0
280 }
281 }
282
disk_at_offset(&self, offset: u64) -> io::Result<&ComponentDiskPart>283 fn disk_at_offset(&self, offset: u64) -> io::Result<&ComponentDiskPart> {
284 self.component_disks
285 .iter()
286 .find(|disk| disk.range().contains(&offset))
287 .ok_or(io::Error::new(
288 ErrorKind::InvalidData,
289 format!("no disk at offset {}", offset),
290 ))
291 }
292 }
293
294 impl DiskGetLen for CompositeDiskFile {
get_len(&self) -> io::Result<u64>295 fn get_len(&self) -> io::Result<u64> {
296 Ok(self.length())
297 }
298 }
299
300 impl FileSetLen for CompositeDiskFile {
set_len(&self, _len: u64) -> io::Result<()>301 fn set_len(&self, _len: u64) -> io::Result<()> {
302 Err(io::Error::new(ErrorKind::Other, "unsupported operation"))
303 }
304 }
305
306 // Implements Read and Write targeting volatile storage for composite disks.
307 //
308 // Note that reads and writes will return early if crossing component disk boundaries.
309 // This is allowed by the read and write specifications, which only say read and write
310 // have to return how many bytes were actually read or written. Use read_exact_volatile
311 // or write_all_volatile to make sure all bytes are received/transmitted.
312 //
313 // If one of the component disks does a partial read or write, that also gets passed
314 // transparently to the parent.
315 impl FileReadWriteAtVolatile for CompositeDiskFile {
read_at_volatile(&self, slice: VolatileSlice, offset: u64) -> io::Result<usize>316 fn read_at_volatile(&self, slice: VolatileSlice, offset: u64) -> io::Result<usize> {
317 let cursor_location = offset;
318 let disk = self.disk_at_offset(cursor_location)?;
319 let subslice = if cursor_location + slice.size() as u64 > disk.offset + disk.length {
320 let new_size = disk.offset + disk.length - cursor_location;
321 slice
322 .sub_slice(0, new_size as usize)
323 .map_err(|e| io::Error::new(ErrorKind::InvalidData, e.to_string()))?
324 } else {
325 slice
326 };
327 disk.file
328 .read_at_volatile(subslice, cursor_location - disk.offset)
329 }
write_at_volatile(&self, slice: VolatileSlice, offset: u64) -> io::Result<usize>330 fn write_at_volatile(&self, slice: VolatileSlice, offset: u64) -> io::Result<usize> {
331 let cursor_location = offset;
332 let disk = self.disk_at_offset(cursor_location)?;
333 let subslice = if cursor_location + slice.size() as u64 > disk.offset + disk.length {
334 let new_size = disk.offset + disk.length - cursor_location;
335 slice
336 .sub_slice(0, new_size as usize)
337 .map_err(|e| io::Error::new(ErrorKind::InvalidData, e.to_string()))?
338 } else {
339 slice
340 };
341
342 let bytes = disk
343 .file
344 .write_at_volatile(subslice, cursor_location - disk.offset)?;
345 disk.needs_fsync.store(true, Ordering::SeqCst);
346 Ok(bytes)
347 }
348 }
349
350 impl AsRawDescriptors for CompositeDiskFile {
as_raw_descriptors(&self) -> Vec<RawDescriptor>351 fn as_raw_descriptors(&self) -> Vec<RawDescriptor> {
352 self.component_disks
353 .iter()
354 .flat_map(|d| d.file.as_raw_descriptors())
355 .collect()
356 }
357 }
358
359 struct AsyncComponentDiskPart {
360 file: Box<dyn AsyncDisk>,
361 offset: u64,
362 length: u64,
363 needs_fsync: AtomicBool,
364 }
365
366 pub struct AsyncCompositeDiskFile {
367 component_disks: Vec<AsyncComponentDiskPart>,
368 }
369
370 impl DiskGetLen for AsyncCompositeDiskFile {
get_len(&self) -> io::Result<u64>371 fn get_len(&self) -> io::Result<u64> {
372 Ok(self.length())
373 }
374 }
375
376 impl FileSetLen for AsyncCompositeDiskFile {
set_len(&self, _len: u64) -> io::Result<()>377 fn set_len(&self, _len: u64) -> io::Result<()> {
378 Err(io::Error::new(ErrorKind::Other, "unsupported operation"))
379 }
380 }
381
382 impl FileAllocate for AsyncCompositeDiskFile {
allocate(&self, offset: u64, length: u64) -> io::Result<()>383 fn allocate(&self, offset: u64, length: u64) -> io::Result<()> {
384 let range = offset..(offset + length);
385 let disks = self
386 .component_disks
387 .iter()
388 .filter(|disk| ranges_overlap(&disk.range(), &range));
389 for disk in disks {
390 if let Some(intersection) = range_intersection(&range, &disk.range()) {
391 disk.file.allocate(
392 intersection.start - disk.offset,
393 intersection.end - intersection.start,
394 )?;
395 disk.needs_fsync.store(true, Ordering::SeqCst);
396 }
397 }
398 Ok(())
399 }
400 }
401
402 impl ToAsyncDisk for CompositeDiskFile {
to_async_disk(self: Box<Self>, ex: &Executor) -> crate::Result<Box<dyn AsyncDisk>>403 fn to_async_disk(self: Box<Self>, ex: &Executor) -> crate::Result<Box<dyn AsyncDisk>> {
404 Ok(Box::new(AsyncCompositeDiskFile {
405 component_disks: self
406 .component_disks
407 .into_iter()
408 .map(|disk| -> crate::Result<_> {
409 Ok(AsyncComponentDiskPart {
410 file: disk.file.to_async_disk(ex)?,
411 offset: disk.offset,
412 length: disk.length,
413 needs_fsync: disk.needs_fsync,
414 })
415 })
416 .collect::<crate::Result<Vec<_>>>()?,
417 }))
418 }
419 }
420
421 impl AsyncComponentDiskPart {
range(&self) -> Range<u64>422 fn range(&self) -> Range<u64> {
423 self.offset..(self.offset + self.length)
424 }
425
set_needs_fsync(&self)426 fn set_needs_fsync(&self) {
427 self.needs_fsync.store(true, Ordering::SeqCst);
428 }
429 }
430
431 impl AsyncCompositeDiskFile {
length(&self) -> u64432 fn length(&self) -> u64 {
433 if let Some(disk) = self.component_disks.last() {
434 disk.offset + disk.length
435 } else {
436 0
437 }
438 }
439
disk_at_offset(&self, offset: u64) -> io::Result<&AsyncComponentDiskPart>440 fn disk_at_offset(&self, offset: u64) -> io::Result<&AsyncComponentDiskPart> {
441 self.component_disks
442 .iter()
443 .find(|disk| disk.range().contains(&offset))
444 .ok_or(io::Error::new(
445 ErrorKind::InvalidData,
446 format!("no disk at offset {}", offset),
447 ))
448 }
449
disks_in_range<'a>(&'a self, range: &Range<u64>) -> Vec<&'a AsyncComponentDiskPart>450 fn disks_in_range<'a>(&'a self, range: &Range<u64>) -> Vec<&'a AsyncComponentDiskPart> {
451 self.component_disks
452 .iter()
453 .filter(|disk| ranges_overlap(&disk.range(), range))
454 .collect()
455 }
456 }
457
458 #[async_trait(?Send)]
459 impl AsyncDisk for AsyncCompositeDiskFile {
flush(&self) -> crate::Result<()>460 async fn flush(&self) -> crate::Result<()> {
461 futures::future::try_join_all(self.component_disks.iter().map(|c| c.file.flush())).await?;
462 Ok(())
463 }
464
fsync(&self) -> crate::Result<()>465 async fn fsync(&self) -> crate::Result<()> {
466 // TODO: handle the disks concurrently
467 for disk in self.component_disks.iter() {
468 if disk.needs_fsync.fetch_and(false, Ordering::SeqCst) {
469 if let Err(e) = disk.file.fsync().await {
470 disk.set_needs_fsync();
471 return Err(e);
472 }
473 }
474 }
475 Ok(())
476 }
477
fdatasync(&self) -> crate::Result<()>478 async fn fdatasync(&self) -> crate::Result<()> {
479 // AsyncCompositeDiskFile does not implement fdatasync for now. Fallback to fsync.
480 self.fsync().await
481 }
482
read_to_mem<'a>( &'a self, file_offset: u64, mem: Arc<dyn BackingMemory + Send + Sync>, mem_offsets: MemRegionIter<'a>, ) -> crate::Result<usize>483 async fn read_to_mem<'a>(
484 &'a self,
485 file_offset: u64,
486 mem: Arc<dyn BackingMemory + Send + Sync>,
487 mem_offsets: MemRegionIter<'a>,
488 ) -> crate::Result<usize> {
489 let disk = self
490 .disk_at_offset(file_offset)
491 .map_err(crate::Error::ReadingData)?;
492 let remaining_disk = disk.offset + disk.length - file_offset;
493 disk.file
494 .read_to_mem(
495 file_offset - disk.offset,
496 mem,
497 mem_offsets.take_bytes(remaining_disk.try_into().unwrap()),
498 )
499 .await
500 }
501
write_from_mem<'a>( &'a self, file_offset: u64, mem: Arc<dyn BackingMemory + Send + Sync>, mem_offsets: MemRegionIter<'a>, ) -> crate::Result<usize>502 async fn write_from_mem<'a>(
503 &'a self,
504 file_offset: u64,
505 mem: Arc<dyn BackingMemory + Send + Sync>,
506 mem_offsets: MemRegionIter<'a>,
507 ) -> crate::Result<usize> {
508 let disk = self
509 .disk_at_offset(file_offset)
510 .map_err(crate::Error::ReadingData)?;
511 let remaining_disk = disk.offset + disk.length - file_offset;
512 let n = disk
513 .file
514 .write_from_mem(
515 file_offset - disk.offset,
516 mem,
517 mem_offsets.take_bytes(remaining_disk.try_into().unwrap()),
518 )
519 .await?;
520 disk.set_needs_fsync();
521 Ok(n)
522 }
523
punch_hole(&self, file_offset: u64, length: u64) -> crate::Result<()>524 async fn punch_hole(&self, file_offset: u64, length: u64) -> crate::Result<()> {
525 let range = file_offset..(file_offset + length);
526 let disks = self.disks_in_range(&range);
527 for disk in disks {
528 if let Some(intersection) = range_intersection(&range, &disk.range()) {
529 disk.file
530 .punch_hole(
531 intersection.start - disk.offset,
532 intersection.end - intersection.start,
533 )
534 .await?;
535 disk.set_needs_fsync();
536 }
537 }
538 Ok(())
539 }
540
write_zeroes_at(&self, file_offset: u64, length: u64) -> crate::Result<()>541 async fn write_zeroes_at(&self, file_offset: u64, length: u64) -> crate::Result<()> {
542 let range = file_offset..(file_offset + length);
543 let disks = self.disks_in_range(&range);
544 for disk in disks {
545 if let Some(intersection) = range_intersection(&range, &disk.range()) {
546 disk.file
547 .write_zeroes_at(
548 intersection.start - disk.offset,
549 intersection.end - intersection.start,
550 )
551 .await?;
552 disk.set_needs_fsync();
553 }
554 }
555 Ok(())
556 }
557 }
558
559 /// Information about a partition to create.
560 #[derive(Clone, Debug, Eq, PartialEq)]
561 pub struct PartitionInfo {
562 pub label: String,
563 pub path: PathBuf,
564 pub partition_type: ImagePartitionType,
565 pub writable: bool,
566 pub size: u64,
567 pub part_guid: Option<Uuid>,
568 }
569
570 /// Round `val` up to the next multiple of 2**`align_log`.
align_to_power_of_2(val: u64, align_log: u8) -> u64571 fn align_to_power_of_2(val: u64, align_log: u8) -> u64 {
572 let align = 1 << align_log;
573 ((val + (align - 1)) / align) * align
574 }
575
576 impl PartitionInfo {
aligned_size(&self) -> u64577 fn aligned_size(&self) -> u64 {
578 align_to_power_of_2(self.size, PARTITION_SIZE_SHIFT)
579 }
580 }
581
582 /// The type of partition.
583 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
584 pub enum ImagePartitionType {
585 LinuxFilesystem,
586 EfiSystemPartition,
587 }
588
589 impl ImagePartitionType {
guid(self) -> Uuid590 fn guid(self) -> Uuid {
591 match self {
592 Self::LinuxFilesystem => LINUX_FILESYSTEM_GUID,
593 Self::EfiSystemPartition => EFI_SYSTEM_PARTITION_GUID,
594 }
595 }
596 }
597
598 /// Write protective MBR and primary GPT table.
write_beginning( file: &mut impl Write, disk_guid: Uuid, partitions: &[u8], partition_entries_crc32: u32, secondary_table_offset: u64, disk_size: u64, ) -> Result<()>599 fn write_beginning(
600 file: &mut impl Write,
601 disk_guid: Uuid,
602 partitions: &[u8],
603 partition_entries_crc32: u32,
604 secondary_table_offset: u64,
605 disk_size: u64,
606 ) -> Result<()> {
607 // Write the protective MBR to the first sector.
608 write_protective_mbr(file, disk_size)?;
609
610 // Write the GPT header, and pad out to the end of the sector.
611 write_gpt_header(
612 file,
613 disk_guid,
614 partition_entries_crc32,
615 secondary_table_offset,
616 false,
617 )?;
618 file.write_all(&[0; HEADER_PADDING_LENGTH])
619 .map_err(Error::WriteHeader)?;
620
621 // Write partition entries, including unused ones.
622 file.write_all(partitions).map_err(Error::WriteHeader)?;
623
624 // Write zeroes to align the first partition appropriately.
625 file.write_all(&[0; PARTITION_ALIGNMENT_SIZE])
626 .map_err(Error::WriteHeader)?;
627
628 Ok(())
629 }
630
631 /// Write secondary GPT table.
write_end( file: &mut impl Write, disk_guid: Uuid, partitions: &[u8], partition_entries_crc32: u32, secondary_table_offset: u64, disk_size: u64, ) -> Result<()>632 fn write_end(
633 file: &mut impl Write,
634 disk_guid: Uuid,
635 partitions: &[u8],
636 partition_entries_crc32: u32,
637 secondary_table_offset: u64,
638 disk_size: u64,
639 ) -> Result<()> {
640 // Write partition entries, including unused ones.
641 file.write_all(partitions).map_err(Error::WriteHeader)?;
642
643 // Write the GPT header, and pad out to the end of the sector.
644 write_gpt_header(
645 file,
646 disk_guid,
647 partition_entries_crc32,
648 secondary_table_offset,
649 true,
650 )?;
651 file.write_all(&[0; HEADER_PADDING_LENGTH])
652 .map_err(Error::WriteHeader)?;
653
654 // Pad out to the aligned disk size.
655 let used_disk_size = secondary_table_offset + GPT_END_SIZE;
656 let padding = disk_size - used_disk_size;
657 file.write_all(&vec![0; padding as usize])
658 .map_err(Error::WriteHeader)?;
659
660 Ok(())
661 }
662
663 /// Create the `GptPartitionEntry` for the given partition.
create_gpt_entry(partition: &PartitionInfo, offset: u64) -> GptPartitionEntry664 fn create_gpt_entry(partition: &PartitionInfo, offset: u64) -> GptPartitionEntry {
665 let mut partition_name: Vec<u16> = partition.label.encode_utf16().collect();
666 partition_name.resize(36, 0);
667
668 GptPartitionEntry {
669 partition_type_guid: partition.partition_type.guid(),
670 unique_partition_guid: partition.part_guid.unwrap_or(Uuid::new_v4()),
671 first_lba: offset / SECTOR_SIZE,
672 last_lba: (offset + partition.aligned_size()) / SECTOR_SIZE - 1,
673 attributes: 0,
674 partition_name: partition_name.try_into().unwrap(),
675 }
676 }
677
678 /// Create one or more `ComponentDisk` proto messages for the given partition.
create_component_disks( partition: &PartitionInfo, offset: u64, zero_filler_path: &str, ) -> Result<Vec<ComponentDisk>>679 fn create_component_disks(
680 partition: &PartitionInfo,
681 offset: u64,
682 zero_filler_path: &str,
683 ) -> Result<Vec<ComponentDisk>> {
684 let aligned_size = partition.aligned_size();
685
686 let mut component_disks = vec![ComponentDisk {
687 offset,
688 file_path: partition
689 .path
690 .to_str()
691 .ok_or_else(|| Error::InvalidPath(partition.path.to_owned()))?
692 .to_string(),
693 read_write_capability: if partition.writable {
694 ReadWriteCapability::READ_WRITE.into()
695 } else {
696 ReadWriteCapability::READ_ONLY.into()
697 },
698 ..ComponentDisk::new()
699 }];
700
701 if partition.size != aligned_size {
702 if partition.writable {
703 return Err(Error::UnalignedReadWrite(partition.to_owned()));
704 } else {
705 // Fill in the gap by reusing the zero filler file, because we know it is always bigger
706 // than the alignment size. Its size is 1 << PARTITION_SIZE_SHIFT (4k).
707 component_disks.push(ComponentDisk {
708 offset: offset + partition.size,
709 file_path: zero_filler_path.to_owned(),
710 read_write_capability: ReadWriteCapability::READ_ONLY.into(),
711 ..ComponentDisk::new()
712 });
713 }
714 }
715
716 Ok(component_disks)
717 }
718
719 /// Create a new composite disk image containing the given partitions, and write it out to the given
720 /// files.
create_composite_disk( partitions: &[PartitionInfo], zero_filler_path: &Path, header_path: &Path, header_file: &mut File, footer_path: &Path, footer_file: &mut File, output_composite: &mut File, ) -> Result<()>721 pub fn create_composite_disk(
722 partitions: &[PartitionInfo],
723 zero_filler_path: &Path,
724 header_path: &Path,
725 header_file: &mut File,
726 footer_path: &Path,
727 footer_file: &mut File,
728 output_composite: &mut File,
729 ) -> Result<()> {
730 let zero_filler_path = zero_filler_path
731 .to_str()
732 .ok_or_else(|| Error::InvalidPath(zero_filler_path.to_owned()))?
733 .to_string();
734 let header_path = header_path
735 .to_str()
736 .ok_or_else(|| Error::InvalidPath(header_path.to_owned()))?
737 .to_string();
738 let footer_path = footer_path
739 .to_str()
740 .ok_or_else(|| Error::InvalidPath(footer_path.to_owned()))?
741 .to_string();
742
743 let mut composite_proto = CompositeDisk::new();
744 composite_proto.version = COMPOSITE_DISK_VERSION;
745 composite_proto.component_disks.push(ComponentDisk {
746 file_path: header_path,
747 offset: 0,
748 read_write_capability: ReadWriteCapability::READ_ONLY.into(),
749 ..ComponentDisk::new()
750 });
751
752 // Write partitions to a temporary buffer so that we can calculate the CRC, and construct the
753 // ComponentDisk proto messages at the same time.
754 let mut partitions_buffer =
755 [0u8; GPT_NUM_PARTITIONS as usize * GPT_PARTITION_ENTRY_SIZE as usize];
756 let mut writer: &mut [u8] = &mut partitions_buffer;
757 let mut next_disk_offset = GPT_BEGINNING_SIZE;
758 let mut labels = HashSet::with_capacity(partitions.len());
759 for partition in partitions {
760 let gpt_entry = create_gpt_entry(partition, next_disk_offset);
761 if !labels.insert(gpt_entry.partition_name) {
762 return Err(Error::DuplicatePartitionLabel(partition.label.clone()));
763 }
764 gpt_entry.write_bytes(&mut writer)?;
765
766 for component_disk in
767 create_component_disks(partition, next_disk_offset, &zero_filler_path)?
768 {
769 composite_proto.component_disks.push(component_disk);
770 }
771
772 next_disk_offset += partition.aligned_size();
773 }
774 let secondary_table_offset = next_disk_offset;
775 let disk_size = align_to_power_of_2(secondary_table_offset + GPT_END_SIZE, DISK_SIZE_SHIFT);
776
777 composite_proto.component_disks.push(ComponentDisk {
778 file_path: footer_path,
779 offset: secondary_table_offset,
780 read_write_capability: ReadWriteCapability::READ_ONLY.into(),
781 ..ComponentDisk::new()
782 });
783
784 // Calculate CRC32 of partition entries.
785 let mut hasher = Hasher::new();
786 hasher.update(&partitions_buffer);
787 let partition_entries_crc32 = hasher.finalize();
788
789 let disk_guid = Uuid::new_v4();
790 write_beginning(
791 header_file,
792 disk_guid,
793 &partitions_buffer,
794 partition_entries_crc32,
795 secondary_table_offset,
796 disk_size,
797 )?;
798 write_end(
799 footer_file,
800 disk_guid,
801 &partitions_buffer,
802 partition_entries_crc32,
803 secondary_table_offset,
804 disk_size,
805 )?;
806
807 composite_proto.length = disk_size;
808 output_composite
809 .write_all(CDISK_MAGIC.as_bytes())
810 .map_err(Error::WriteHeader)?;
811 composite_proto
812 .write_to_writer(output_composite)
813 .map_err(Error::WriteProto)?;
814
815 Ok(())
816 }
817
818 /// Create a zero filler file which can be used to fill the gaps between partition files.
819 /// The filler is sized to be big enough to fill the gaps. (1 << PARTITION_SIZE_SHIFT)
create_zero_filler<P: AsRef<Path>>(zero_filler_path: P) -> Result<()>820 pub fn create_zero_filler<P: AsRef<Path>>(zero_filler_path: P) -> Result<()> {
821 let f = OpenOptions::new()
822 .create(true)
823 .read(true)
824 .write(true)
825 .truncate(true)
826 .open(zero_filler_path.as_ref())
827 .map_err(Error::WriteZeroFiller)?;
828 f.set_len(1 << PARTITION_SIZE_SHIFT)
829 .map_err(Error::WriteZeroFiller)
830 }
831
832 #[cfg(test)]
833 mod tests {
834 use std::fs::OpenOptions;
835 use std::io::Write;
836 use std::matches;
837
838 use base::AsRawDescriptor;
839 use tempfile::tempfile;
840
841 use super::*;
842
new_from_components(disks: Vec<ComponentDiskPart>) -> Result<CompositeDiskFile>843 fn new_from_components(disks: Vec<ComponentDiskPart>) -> Result<CompositeDiskFile> {
844 CompositeDiskFile::new(disks, tempfile().unwrap())
845 }
846
847 #[test]
block_duplicate_offset_disks()848 fn block_duplicate_offset_disks() {
849 let file1 = tempfile().unwrap();
850 let file2 = tempfile().unwrap();
851 let disk_part1 = ComponentDiskPart {
852 file: Box::new(file1),
853 offset: 0,
854 length: 100,
855 needs_fsync: AtomicBool::new(false),
856 };
857 let disk_part2 = ComponentDiskPart {
858 file: Box::new(file2),
859 offset: 0,
860 length: 100,
861 needs_fsync: AtomicBool::new(false),
862 };
863 assert!(new_from_components(vec![disk_part1, disk_part2]).is_err());
864 }
865
866 #[test]
get_len()867 fn get_len() {
868 let file1 = tempfile().unwrap();
869 let file2 = tempfile().unwrap();
870 let disk_part1 = ComponentDiskPart {
871 file: Box::new(file1),
872 offset: 0,
873 length: 100,
874 needs_fsync: AtomicBool::new(false),
875 };
876 let disk_part2 = ComponentDiskPart {
877 file: Box::new(file2),
878 offset: 100,
879 length: 100,
880 needs_fsync: AtomicBool::new(false),
881 };
882 let composite = new_from_components(vec![disk_part1, disk_part2]).unwrap();
883 let len = composite.get_len().unwrap();
884 assert_eq!(len, 200);
885 }
886
887 #[test]
async_get_len()888 fn async_get_len() {
889 let file1 = tempfile().unwrap();
890 let file2 = tempfile().unwrap();
891 let disk_part1 = ComponentDiskPart {
892 file: Box::new(file1),
893 offset: 0,
894 length: 100,
895 needs_fsync: AtomicBool::new(false),
896 };
897 let disk_part2 = ComponentDiskPart {
898 file: Box::new(file2),
899 offset: 100,
900 length: 100,
901 needs_fsync: AtomicBool::new(false),
902 };
903 let composite = new_from_components(vec![disk_part1, disk_part2]).unwrap();
904
905 let ex = Executor::new().unwrap();
906 let composite = Box::new(composite).to_async_disk(&ex).unwrap();
907 let len = composite.get_len().unwrap();
908 assert_eq!(len, 200);
909 }
910
911 #[test]
single_file_passthrough()912 fn single_file_passthrough() {
913 let file = tempfile().unwrap();
914 let disk_part = ComponentDiskPart {
915 file: Box::new(file),
916 offset: 0,
917 length: 100,
918 needs_fsync: AtomicBool::new(false),
919 };
920 let composite = new_from_components(vec![disk_part]).unwrap();
921 let mut input_memory = [55u8; 5];
922 let input_volatile_memory = VolatileSlice::new(&mut input_memory[..]);
923 composite
924 .write_all_at_volatile(input_volatile_memory, 0)
925 .unwrap();
926 let mut output_memory = [0u8; 5];
927 let output_volatile_memory = VolatileSlice::new(&mut output_memory[..]);
928 composite
929 .read_exact_at_volatile(output_volatile_memory, 0)
930 .unwrap();
931 assert_eq!(input_memory, output_memory);
932 }
933
934 #[test]
async_single_file_passthrough()935 fn async_single_file_passthrough() {
936 let file = tempfile().unwrap();
937 let disk_part = ComponentDiskPart {
938 file: Box::new(file),
939 offset: 0,
940 length: 100,
941 needs_fsync: AtomicBool::new(false),
942 };
943 let composite = new_from_components(vec![disk_part]).unwrap();
944 let ex = Executor::new().unwrap();
945 ex.run_until(async {
946 let composite = Box::new(composite).to_async_disk(&ex).unwrap();
947 let expected = [55u8; 5];
948 assert_eq!(
949 composite.write_double_buffered(0, &expected).await.unwrap(),
950 5
951 );
952 let mut buf = [0u8; 5];
953 assert_eq!(
954 composite
955 .read_double_buffered(0, &mut buf[..])
956 .await
957 .unwrap(),
958 5
959 );
960 assert_eq!(buf, expected);
961 })
962 .unwrap();
963 }
964
965 #[test]
triple_file_descriptors()966 fn triple_file_descriptors() {
967 let file1 = tempfile().unwrap();
968 let file2 = tempfile().unwrap();
969 let file3 = tempfile().unwrap();
970 let mut in_descriptors = vec![
971 file1.as_raw_descriptor(),
972 file2.as_raw_descriptor(),
973 file3.as_raw_descriptor(),
974 ];
975 in_descriptors.sort_unstable();
976 let disk_part1 = ComponentDiskPart {
977 file: Box::new(file1),
978 offset: 0,
979 length: 100,
980 needs_fsync: AtomicBool::new(false),
981 };
982 let disk_part2 = ComponentDiskPart {
983 file: Box::new(file2),
984 offset: 100,
985 length: 100,
986 needs_fsync: AtomicBool::new(false),
987 };
988 let disk_part3 = ComponentDiskPart {
989 file: Box::new(file3),
990 offset: 200,
991 length: 100,
992 needs_fsync: AtomicBool::new(false),
993 };
994 let composite = new_from_components(vec![disk_part1, disk_part2, disk_part3]).unwrap();
995 let mut out_descriptors = composite.as_raw_descriptors();
996 out_descriptors.sort_unstable();
997 assert_eq!(in_descriptors, out_descriptors);
998 }
999
1000 #[test]
triple_file_passthrough()1001 fn triple_file_passthrough() {
1002 let file1 = tempfile().unwrap();
1003 let file2 = tempfile().unwrap();
1004 let file3 = tempfile().unwrap();
1005 let disk_part1 = ComponentDiskPart {
1006 file: Box::new(file1),
1007 offset: 0,
1008 length: 100,
1009 needs_fsync: AtomicBool::new(false),
1010 };
1011 let disk_part2 = ComponentDiskPart {
1012 file: Box::new(file2),
1013 offset: 100,
1014 length: 100,
1015 needs_fsync: AtomicBool::new(false),
1016 };
1017 let disk_part3 = ComponentDiskPart {
1018 file: Box::new(file3),
1019 offset: 200,
1020 length: 100,
1021 needs_fsync: AtomicBool::new(false),
1022 };
1023 let composite = new_from_components(vec![disk_part1, disk_part2, disk_part3]).unwrap();
1024 let mut input_memory = [55u8; 200];
1025 let input_volatile_memory = VolatileSlice::new(&mut input_memory[..]);
1026 composite
1027 .write_all_at_volatile(input_volatile_memory, 50)
1028 .unwrap();
1029 let mut output_memory = [0u8; 200];
1030 let output_volatile_memory = VolatileSlice::new(&mut output_memory[..]);
1031 composite
1032 .read_exact_at_volatile(output_volatile_memory, 50)
1033 .unwrap();
1034 assert!(input_memory.iter().eq(output_memory.iter()));
1035 }
1036
1037 #[test]
async_triple_file_passthrough()1038 fn async_triple_file_passthrough() {
1039 let file1 = tempfile().unwrap();
1040 let file2 = tempfile().unwrap();
1041 let file3 = tempfile().unwrap();
1042 let disk_part1 = ComponentDiskPart {
1043 file: Box::new(file1),
1044 offset: 0,
1045 length: 100,
1046 needs_fsync: AtomicBool::new(false),
1047 };
1048 let disk_part2 = ComponentDiskPart {
1049 file: Box::new(file2),
1050 offset: 100,
1051 length: 100,
1052 needs_fsync: AtomicBool::new(false),
1053 };
1054 let disk_part3 = ComponentDiskPart {
1055 file: Box::new(file3),
1056 offset: 200,
1057 length: 100,
1058 needs_fsync: AtomicBool::new(false),
1059 };
1060 let composite = new_from_components(vec![disk_part1, disk_part2, disk_part3]).unwrap();
1061 let ex = Executor::new().unwrap();
1062 ex.run_until(async {
1063 let composite = Box::new(composite).to_async_disk(&ex).unwrap();
1064
1065 let expected = [55u8; 200];
1066 assert_eq!(
1067 composite.write_double_buffered(0, &expected).await.unwrap(),
1068 100
1069 );
1070 assert_eq!(
1071 composite
1072 .write_double_buffered(100, &expected[100..])
1073 .await
1074 .unwrap(),
1075 100
1076 );
1077
1078 let mut buf = [0u8; 200];
1079 assert_eq!(
1080 composite
1081 .read_double_buffered(0, &mut buf[..])
1082 .await
1083 .unwrap(),
1084 100
1085 );
1086 assert_eq!(
1087 composite
1088 .read_double_buffered(100, &mut buf[100..])
1089 .await
1090 .unwrap(),
1091 100
1092 );
1093 assert_eq!(buf, expected);
1094 })
1095 .unwrap();
1096 }
1097
1098 #[test]
async_triple_file_punch_hole()1099 fn async_triple_file_punch_hole() {
1100 let file1 = tempfile().unwrap();
1101 let file2 = tempfile().unwrap();
1102 let file3 = tempfile().unwrap();
1103 let disk_part1 = ComponentDiskPart {
1104 file: Box::new(file1),
1105 offset: 0,
1106 length: 100,
1107 needs_fsync: AtomicBool::new(false),
1108 };
1109 let disk_part2 = ComponentDiskPart {
1110 file: Box::new(file2),
1111 offset: 100,
1112 length: 100,
1113 needs_fsync: AtomicBool::new(false),
1114 };
1115 let disk_part3 = ComponentDiskPart {
1116 file: Box::new(file3),
1117 offset: 200,
1118 length: 100,
1119 needs_fsync: AtomicBool::new(false),
1120 };
1121 let composite = new_from_components(vec![disk_part1, disk_part2, disk_part3]).unwrap();
1122 let ex = Executor::new().unwrap();
1123 ex.run_until(async {
1124 let composite = Box::new(composite).to_async_disk(&ex).unwrap();
1125
1126 let input = [55u8; 300];
1127 assert_eq!(
1128 composite.write_double_buffered(0, &input).await.unwrap(),
1129 100
1130 );
1131 assert_eq!(
1132 composite
1133 .write_double_buffered(100, &input[100..])
1134 .await
1135 .unwrap(),
1136 100
1137 );
1138 assert_eq!(
1139 composite
1140 .write_double_buffered(200, &input[200..])
1141 .await
1142 .unwrap(),
1143 100
1144 );
1145
1146 composite.punch_hole(50, 200).await.unwrap();
1147
1148 let mut buf = [0u8; 300];
1149 assert_eq!(
1150 composite
1151 .read_double_buffered(0, &mut buf[..])
1152 .await
1153 .unwrap(),
1154 100
1155 );
1156 assert_eq!(
1157 composite
1158 .read_double_buffered(100, &mut buf[100..])
1159 .await
1160 .unwrap(),
1161 100
1162 );
1163 assert_eq!(
1164 composite
1165 .read_double_buffered(200, &mut buf[200..])
1166 .await
1167 .unwrap(),
1168 100
1169 );
1170
1171 let mut expected = input;
1172 expected[50..250].iter_mut().for_each(|x| *x = 0);
1173 assert_eq!(buf, expected);
1174 })
1175 .unwrap();
1176 }
1177
1178 #[test]
async_triple_file_write_zeroes()1179 fn async_triple_file_write_zeroes() {
1180 let file1 = tempfile().unwrap();
1181 let file2 = tempfile().unwrap();
1182 let file3 = tempfile().unwrap();
1183 let disk_part1 = ComponentDiskPart {
1184 file: Box::new(file1),
1185 offset: 0,
1186 length: 100,
1187 needs_fsync: AtomicBool::new(false),
1188 };
1189 let disk_part2 = ComponentDiskPart {
1190 file: Box::new(file2),
1191 offset: 100,
1192 length: 100,
1193 needs_fsync: AtomicBool::new(false),
1194 };
1195 let disk_part3 = ComponentDiskPart {
1196 file: Box::new(file3),
1197 offset: 200,
1198 length: 100,
1199 needs_fsync: AtomicBool::new(false),
1200 };
1201 let composite = new_from_components(vec![disk_part1, disk_part2, disk_part3]).unwrap();
1202 let ex = Executor::new().unwrap();
1203 ex.run_until(async {
1204 let composite = Box::new(composite).to_async_disk(&ex).unwrap();
1205
1206 let input = [55u8; 300];
1207 assert_eq!(
1208 composite.write_double_buffered(0, &input).await.unwrap(),
1209 100
1210 );
1211 assert_eq!(
1212 composite
1213 .write_double_buffered(100, &input[100..])
1214 .await
1215 .unwrap(),
1216 100
1217 );
1218 assert_eq!(
1219 composite
1220 .write_double_buffered(200, &input[200..])
1221 .await
1222 .unwrap(),
1223 100
1224 );
1225
1226 composite.write_zeroes_at(50, 200).await.unwrap();
1227
1228 let mut buf = [0u8; 300];
1229 assert_eq!(
1230 composite
1231 .read_double_buffered(0, &mut buf[..])
1232 .await
1233 .unwrap(),
1234 100
1235 );
1236 assert_eq!(
1237 composite
1238 .read_double_buffered(100, &mut buf[100..])
1239 .await
1240 .unwrap(),
1241 100
1242 );
1243 assert_eq!(
1244 composite
1245 .read_double_buffered(200, &mut buf[200..])
1246 .await
1247 .unwrap(),
1248 100
1249 );
1250
1251 let mut expected = input;
1252 expected[50..250].iter_mut().for_each(|x| *x = 0);
1253 assert_eq!(buf, expected);
1254 })
1255 .unwrap();
1256 }
1257
1258 // TODO: fsync on a RO file is legal, this test doesn't work as expected. Consider using a mock
1259 // DiskFile to detect the fsync calls.
1260 #[test]
async_fsync_skips_unchanged_parts()1261 fn async_fsync_skips_unchanged_parts() {
1262 let mut rw_file = tempfile().unwrap();
1263 rw_file.write_all(&[0u8; 100]).unwrap();
1264 rw_file.seek(SeekFrom::Start(0)).unwrap();
1265 let mut ro_disk_image = tempfile::NamedTempFile::new().unwrap();
1266 ro_disk_image.write_all(&[0u8; 100]).unwrap();
1267 let ro_file = OpenOptions::new()
1268 .read(true)
1269 .open(ro_disk_image.path())
1270 .unwrap();
1271
1272 let rw_part = ComponentDiskPart {
1273 file: Box::new(rw_file),
1274 offset: 0,
1275 length: 100,
1276 needs_fsync: AtomicBool::new(false),
1277 };
1278 let ro_part = ComponentDiskPart {
1279 file: Box::new(ro_file),
1280 offset: 100,
1281 length: 100,
1282 needs_fsync: AtomicBool::new(false),
1283 };
1284 let composite = new_from_components(vec![rw_part, ro_part]).unwrap();
1285 let ex = Executor::new().unwrap();
1286 ex.run_until(async {
1287 let composite = Box::new(composite).to_async_disk(&ex).unwrap();
1288
1289 // Write to the RW part so that some fsync operation will occur.
1290 composite.write_zeroes_at(0, 20).await.unwrap();
1291
1292 // This is the test's assert. fsyncing should NOT touch a read-only disk part. On
1293 // Windows, this would be an error.
1294 composite.fsync().await.expect(
1295 "Failed to fsync composite disk. \
1296 This can happen if the disk writable state is wrong.",
1297 );
1298 })
1299 .unwrap();
1300 }
1301
1302 #[test]
beginning_size()1303 fn beginning_size() {
1304 let mut buffer = vec![];
1305 let partitions = [0u8; GPT_NUM_PARTITIONS as usize * GPT_PARTITION_ENTRY_SIZE as usize];
1306 let disk_size = 1000 * SECTOR_SIZE;
1307 write_beginning(
1308 &mut buffer,
1309 Uuid::from_u128(0x12345678_1234_5678_abcd_12345678abcd),
1310 &partitions,
1311 42,
1312 disk_size - GPT_END_SIZE,
1313 disk_size,
1314 )
1315 .unwrap();
1316
1317 assert_eq!(buffer.len(), GPT_BEGINNING_SIZE as usize);
1318 }
1319
1320 #[test]
end_size()1321 fn end_size() {
1322 let mut buffer = vec![];
1323 let partitions = [0u8; GPT_NUM_PARTITIONS as usize * GPT_PARTITION_ENTRY_SIZE as usize];
1324 let disk_size = 1000 * SECTOR_SIZE;
1325 write_end(
1326 &mut buffer,
1327 Uuid::from_u128(0x12345678_1234_5678_abcd_12345678abcd),
1328 &partitions,
1329 42,
1330 disk_size - GPT_END_SIZE,
1331 disk_size,
1332 )
1333 .unwrap();
1334
1335 assert_eq!(buffer.len(), GPT_END_SIZE as usize);
1336 }
1337
1338 #[test]
end_size_with_padding()1339 fn end_size_with_padding() {
1340 let mut buffer = vec![];
1341 let partitions = [0u8; GPT_NUM_PARTITIONS as usize * GPT_PARTITION_ENTRY_SIZE as usize];
1342 let disk_size = 1000 * SECTOR_SIZE;
1343 let padding = 3 * SECTOR_SIZE;
1344 write_end(
1345 &mut buffer,
1346 Uuid::from_u128(0x12345678_1234_5678_abcd_12345678abcd),
1347 &partitions,
1348 42,
1349 disk_size - GPT_END_SIZE - padding,
1350 disk_size,
1351 )
1352 .unwrap();
1353
1354 assert_eq!(buffer.len(), GPT_END_SIZE as usize + padding as usize);
1355 }
1356
1357 /// Creates a composite disk image with no partitions.
1358 #[test]
create_composite_disk_empty()1359 fn create_composite_disk_empty() {
1360 let mut header_image = tempfile().unwrap();
1361 let mut footer_image = tempfile().unwrap();
1362 let mut composite_image = tempfile().unwrap();
1363
1364 create_composite_disk(
1365 &[],
1366 Path::new("/zero_filler.img"),
1367 Path::new("/header_path.img"),
1368 &mut header_image,
1369 Path::new("/footer_path.img"),
1370 &mut footer_image,
1371 &mut composite_image,
1372 )
1373 .unwrap();
1374 }
1375
1376 /// Creates a composite disk image with two partitions.
1377 #[test]
create_composite_disk_success()1378 fn create_composite_disk_success() {
1379 let mut header_image = tempfile().unwrap();
1380 let mut footer_image = tempfile().unwrap();
1381 let mut composite_image = tempfile().unwrap();
1382
1383 create_composite_disk(
1384 &[
1385 PartitionInfo {
1386 label: "partition1".to_string(),
1387 path: "/partition1.img".to_string().into(),
1388 partition_type: ImagePartitionType::LinuxFilesystem,
1389 writable: false,
1390 // Needs small amount of padding.
1391 size: 4000,
1392 part_guid: None,
1393 },
1394 PartitionInfo {
1395 label: "partition2".to_string(),
1396 path: "/partition2.img".to_string().into(),
1397 partition_type: ImagePartitionType::LinuxFilesystem,
1398 writable: true,
1399 // Needs no padding.
1400 size: 4096,
1401 part_guid: Some(Uuid::from_u128(0x4049C8DC_6C2B_C740_A95A_BDAA629D4378)),
1402 },
1403 ],
1404 Path::new("/zero_filler.img"),
1405 Path::new("/header_path.img"),
1406 &mut header_image,
1407 Path::new("/footer_path.img"),
1408 &mut footer_image,
1409 &mut composite_image,
1410 )
1411 .unwrap();
1412
1413 // Check magic.
1414 composite_image.rewind().unwrap();
1415 let mut magic_space = [0u8; CDISK_MAGIC.len()];
1416 composite_image.read_exact(&mut magic_space[..]).unwrap();
1417 assert_eq!(magic_space, CDISK_MAGIC.as_bytes());
1418 // Check proto.
1419 let proto = CompositeDisk::parse_from_reader(&mut composite_image).unwrap();
1420 assert_eq!(
1421 proto,
1422 CompositeDisk {
1423 version: 2,
1424 component_disks: vec![
1425 ComponentDisk {
1426 file_path: "/header_path.img".to_string(),
1427 offset: 0,
1428 read_write_capability: ReadWriteCapability::READ_ONLY.into(),
1429 ..ComponentDisk::new()
1430 },
1431 ComponentDisk {
1432 file_path: "/partition1.img".to_string(),
1433 offset: 0x5000, // GPT_BEGINNING_SIZE,
1434 read_write_capability: ReadWriteCapability::READ_ONLY.into(),
1435 ..ComponentDisk::new()
1436 },
1437 ComponentDisk {
1438 file_path: "/zero_filler.img".to_string(),
1439 offset: 0x5fa0, // GPT_BEGINNING_SIZE + 4000,
1440 read_write_capability: ReadWriteCapability::READ_ONLY.into(),
1441 ..ComponentDisk::new()
1442 },
1443 ComponentDisk {
1444 file_path: "/partition2.img".to_string(),
1445 offset: 0x6000, // GPT_BEGINNING_SIZE + 4096,
1446 read_write_capability: ReadWriteCapability::READ_WRITE.into(),
1447 ..ComponentDisk::new()
1448 },
1449 ComponentDisk {
1450 file_path: "/footer_path.img".to_string(),
1451 offset: 0x7000, // GPT_BEGINNING_SIZE + 4096 + 4096,
1452 read_write_capability: ReadWriteCapability::READ_ONLY.into(),
1453 ..ComponentDisk::new()
1454 },
1455 ],
1456 length: 0x10000, // 1 << DISK_SIZE_SHIFT
1457 ..CompositeDisk::new()
1458 }
1459 );
1460 }
1461
1462 /// Attempts to create a composite disk image with two partitions with the same label.
1463 #[test]
create_composite_disk_duplicate_label()1464 fn create_composite_disk_duplicate_label() {
1465 let mut header_image = tempfile().unwrap();
1466 let mut footer_image = tempfile().unwrap();
1467 let mut composite_image = tempfile().unwrap();
1468
1469 let result = create_composite_disk(
1470 &[
1471 PartitionInfo {
1472 label: "label".to_string(),
1473 path: "/partition1.img".to_string().into(),
1474 partition_type: ImagePartitionType::LinuxFilesystem,
1475 writable: false,
1476 size: 0,
1477 part_guid: None,
1478 },
1479 PartitionInfo {
1480 label: "label".to_string(),
1481 path: "/partition2.img".to_string().into(),
1482 partition_type: ImagePartitionType::LinuxFilesystem,
1483 writable: true,
1484 size: 0,
1485 part_guid: None,
1486 },
1487 ],
1488 Path::new("/zero_filler.img"),
1489 Path::new("/header_path.img"),
1490 &mut header_image,
1491 Path::new("/footer_path.img"),
1492 &mut footer_image,
1493 &mut composite_image,
1494 );
1495 assert!(matches!(result, Err(Error::DuplicatePartitionLabel(label)) if label == "label"));
1496 }
1497 }
1498