1 // Copyright 2024 The ChromiumOS Authors 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 //! Defines a struct to represent an ext2 filesystem and implements methods to create 6 // a filesystem in memory. 7 8 use std::collections::BTreeMap; 9 use std::ffi::OsStr; 10 use std::ffi::OsString; 11 use std::fs::DirEntry; 12 use std::fs::File; 13 use std::os::unix::ffi::OsStrExt; 14 use std::path::Path; 15 16 use anyhow::anyhow; 17 use anyhow::bail; 18 use anyhow::Context; 19 use anyhow::Result; 20 use base::info; 21 use zerocopy::AsBytes; 22 use zerocopy::FromBytes; 23 use zerocopy::FromZeroes; 24 25 use crate::arena::Arena; 26 use crate::arena::BlockId; 27 use crate::blockgroup::BlockGroupDescriptor; 28 use crate::blockgroup::GroupMetaData; 29 use crate::blockgroup::BLOCK_SIZE; 30 use crate::builder::Builder; 31 use crate::inode::Inode; 32 use crate::inode::InodeBlock; 33 use crate::inode::InodeBlocksCount; 34 use crate::inode::InodeNum; 35 use crate::inode::InodeType; 36 use crate::superblock::SuperBlock; 37 use crate::xattr::InlineXattrs; 38 39 #[repr(C)] 40 #[derive(Copy, Clone, FromZeroes, FromBytes, AsBytes, Debug)] 41 struct DirEntryRaw { 42 inode: u32, 43 rec_len: u16, 44 name_len: u8, 45 file_type: u8, 46 } 47 48 struct DirEntryWithName<'a> { 49 de: &'a mut DirEntryRaw, 50 name: OsString, 51 } 52 53 impl<'a> std::fmt::Debug for DirEntryWithName<'a> { fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result54 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 55 f.debug_struct("DirEntry") 56 .field("de", &self.de) 57 .field("name", &self.name) 58 .finish() 59 } 60 } 61 62 impl<'a> DirEntryWithName<'a> { new( arena: &'a Arena<'a>, inode: InodeNum, typ: InodeType, name_str: &OsStr, dblock: &mut DirEntryBlock, ) -> Result<Self>63 fn new( 64 arena: &'a Arena<'a>, 65 inode: InodeNum, 66 typ: InodeType, 67 name_str: &OsStr, 68 dblock: &mut DirEntryBlock, 69 ) -> Result<Self> { 70 let cs = name_str.as_bytes(); 71 let name_len = cs.len(); 72 let aligned_name_len = name_len 73 .checked_next_multiple_of(4) 74 .expect("name length must be 4-byte aligned"); 75 76 // rec_len = |inode| + |file_type| + |name_len| + |rec_len| + name + padding 77 // = 4 + 1 + 1 + 2 + |name| + padding 78 // = 8 + |name| + padding 79 // The padding is inserted because the name is 4-byte aligned. 80 let rec_len = 8 + aligned_name_len as u16; 81 82 let de = arena.allocate(dblock.block_id, dblock.offset)?; 83 *de = DirEntryRaw { 84 inode: inode.into(), 85 rec_len, 86 name_len: name_len as u8, 87 file_type: typ.into_dir_entry_file_type(), 88 }; 89 dblock.offset += std::mem::size_of::<DirEntryRaw>(); 90 91 let name_slice = arena.allocate_slice(dblock.block_id, dblock.offset, aligned_name_len)?; 92 dblock.offset += aligned_name_len; 93 name_slice[..cs.len()].copy_from_slice(cs); 94 95 if dblock.entries.is_empty() { 96 de.rec_len = BLOCK_SIZE as u16; 97 } else { 98 let last = dblock 99 .entries 100 .last_mut() 101 .expect("parent_dir must not be empty"); 102 let last_rec_len = last.de.rec_len; 103 last.de.rec_len = (8 + last.name.as_os_str().as_bytes().len() as u16) 104 .checked_next_multiple_of(4) 105 .expect("overflow to calculate rec_len"); 106 de.rec_len = last_rec_len - last.de.rec_len; 107 } 108 109 Ok(Self { 110 de, 111 name: name_str.into(), 112 }) 113 } 114 } 115 116 #[derive(Debug)] 117 struct DirEntryBlock<'a> { 118 block_id: BlockId, 119 offset: usize, 120 entries: Vec<DirEntryWithName<'a>>, 121 } 122 123 impl DirEntryBlock<'_> { has_enough_space(&self, name: &OsStr) -> bool124 fn has_enough_space(&self, name: &OsStr) -> bool { 125 let dir_entry_size = std::mem::size_of::<DirEntryRaw>(); 126 let aligned_name_len = name 127 .as_bytes() 128 .len() 129 .checked_next_multiple_of(4) 130 .expect("length must be < 256 bytes so it must not overflow"); 131 self.offset + dir_entry_size + aligned_name_len <= BLOCK_SIZE 132 } 133 } 134 135 /// A struct to represent an ext2 filesystem. 136 pub(crate) struct Ext2<'a> { 137 sb: &'a mut SuperBlock, 138 cur_block_group: usize, 139 cur_inode_table: usize, 140 141 group_metadata: Vec<GroupMetaData<'a>>, 142 143 dir_entries: BTreeMap<InodeNum, Vec<DirEntryBlock<'a>>>, 144 } 145 146 impl<'a> Ext2<'a> { new(builder: &Builder, arena: &'a Arena<'a>) -> Result<Self>147 pub(crate) fn new(builder: &Builder, arena: &'a Arena<'a>) -> Result<Self> { 148 let sb = SuperBlock::new(arena, builder)?; 149 let mut group_metadata = vec![]; 150 for i in 0..sb.num_groups() { 151 group_metadata.push(GroupMetaData::new(arena, sb, i)?); 152 } 153 154 let mut ext2 = Ext2 { 155 sb, 156 cur_block_group: 0, 157 cur_inode_table: 0, 158 group_metadata, 159 dir_entries: BTreeMap::new(), 160 }; 161 162 // Add rootdir 163 let root_inode = InodeNum::new(2)?; 164 let root_xattr = match &builder.root_dir { 165 Some(dir) => Some(InlineXattrs::from_path(dir)?), 166 None => None, 167 }; 168 ext2.add_reserved_dir(arena, root_inode, root_inode, OsStr::new("/"), root_xattr)?; 169 let lost_found_inode = ext2.allocate_inode()?; 170 ext2.add_reserved_dir( 171 arena, 172 lost_found_inode, 173 root_inode, 174 OsStr::new("lost+found"), 175 None, 176 )?; 177 178 Ok(ext2) 179 } 180 allocate_inode(&mut self) -> Result<InodeNum>181 fn allocate_inode(&mut self) -> Result<InodeNum> { 182 if self.sb.free_inodes_count == 0 { 183 bail!( 184 "no free inodes: run out of s_inodes_count={}", 185 self.sb.inodes_count 186 ); 187 } 188 189 if self.group_metadata[self.cur_inode_table] 190 .group_desc 191 .free_inodes_count 192 == 0 193 { 194 self.cur_inode_table += 1; 195 } 196 197 let gm = &mut self.group_metadata[self.cur_inode_table]; 198 let alloc_inode = InodeNum::new(gm.first_free_inode)?; 199 // (alloc_inode - 1) because inode is 1-indexed. 200 gm.inode_bitmap 201 .set( 202 (usize::from(alloc_inode) - 1) % self.sb.inodes_per_group as usize, 203 true, 204 ) 205 .context("failed to set inode bitmap")?; 206 207 gm.first_free_inode += 1; 208 gm.group_desc.free_inodes_count -= 1; 209 self.sb.free_inodes_count -= 1; 210 Ok(alloc_inode) 211 } 212 allocate_block(&mut self) -> Result<BlockId>213 fn allocate_block(&mut self) -> Result<BlockId> { 214 self.allocate_contiguous_blocks(1).map(|v| v[0][0]) 215 } 216 allocate_contiguous_blocks(&mut self, n: u16) -> Result<Vec<Vec<BlockId>>>217 fn allocate_contiguous_blocks(&mut self, n: u16) -> Result<Vec<Vec<BlockId>>> { 218 if n == 0 { 219 bail!("n must be positive"); 220 } 221 if self.sb.free_blocks_count < n as u32 { 222 bail!( 223 "no free blocks: run out of free_blocks_count={} < {n}", 224 self.sb.free_blocks_count 225 ); 226 } 227 228 let mut contig_blocks = vec![]; 229 let mut remaining = n; 230 while remaining > 0 { 231 let alloc_block_num = std::cmp::min( 232 remaining, 233 self.group_metadata[self.cur_block_group] 234 .group_desc 235 .free_blocks_count, 236 ) as u32; 237 238 let gm = &mut self.group_metadata[self.cur_block_group]; 239 let alloc_blocks = (gm.first_free_block..gm.first_free_block + alloc_block_num) 240 .map(BlockId::from) 241 .collect(); 242 gm.first_free_block += alloc_block_num; 243 gm.group_desc.free_blocks_count -= alloc_block_num as u16; 244 self.sb.free_blocks_count -= alloc_block_num; 245 for &b in &alloc_blocks { 246 let index = u32::from(b) as usize 247 - self.cur_block_group * self.sb.blocks_per_group as usize; 248 gm.block_bitmap 249 .set(index, true) 250 .with_context(|| format!("failed to set block_bitmap at {index}"))?; 251 } 252 remaining -= alloc_block_num as u16; 253 if self.group_metadata[self.cur_block_group] 254 .group_desc 255 .free_blocks_count 256 == 0 257 { 258 self.cur_block_group += 1; 259 } 260 contig_blocks.push(alloc_blocks); 261 } 262 263 Ok(contig_blocks) 264 } 265 group_num_for_inode(&self, inode: InodeNum) -> usize266 fn group_num_for_inode(&self, inode: InodeNum) -> usize { 267 inode.to_table_index() / self.sb.inodes_per_group as usize 268 } 269 get_inode_mut(&mut self, num: InodeNum) -> Result<&mut &'a mut Inode>270 fn get_inode_mut(&mut self, num: InodeNum) -> Result<&mut &'a mut Inode> { 271 let group_id = self.group_num_for_inode(num); 272 self.group_metadata[group_id] 273 .inode_table 274 .get_mut(&num) 275 .ok_or_else(|| anyhow!("{:?} not found", num)) 276 } 277 allocate_dir_entry( &mut self, arena: &'a Arena<'a>, parent: InodeNum, inode: InodeNum, typ: InodeType, name: &OsStr, ) -> Result<()>278 fn allocate_dir_entry( 279 &mut self, 280 arena: &'a Arena<'a>, 281 parent: InodeNum, 282 inode: InodeNum, 283 typ: InodeType, 284 name: &OsStr, 285 ) -> Result<()> { 286 if name.is_empty() { 287 bail!("directory name must not be empty"); 288 } else if name.len() > 255 { 289 bail!("name length must not exceed 255: {:?}", name); 290 } 291 292 // Disable false-positive `clippy::map_entry`. 293 // https://github.com/rust-lang/rust-clippy/issues/9470 294 #[allow(clippy::map_entry)] 295 if !self.dir_entries.contains_key(&parent) { 296 let block_id = self.allocate_block()?; 297 let inode = self.get_inode_mut(parent)?; 298 inode.block.set_direct_blocks(&[block_id])?; 299 inode.blocks = InodeBlocksCount::from_bytes_len(BLOCK_SIZE as u32); 300 self.dir_entries.insert( 301 parent, 302 vec![DirEntryBlock { 303 block_id, 304 offset: 0, 305 entries: Vec::new(), 306 }], 307 ); 308 } 309 310 // Allocates a new block for dir entries if needed. 311 if !self 312 .dir_entries 313 .get(&parent) 314 .ok_or_else(|| anyhow!("parent {:?} not found for {:?}", parent, inode))? 315 .last() 316 .expect("directory entries must not be empty") 317 .has_enough_space(name) 318 { 319 let idx = self.dir_entries.get(&parent).unwrap().len(); 320 let block_id = self.allocate_block()?; 321 let parent_inode = self.get_inode_mut(parent)?; 322 parent_inode.block.set_block_id(idx, &block_id)?; 323 parent_inode.blocks.add(BLOCK_SIZE as u32); 324 parent_inode.size += BLOCK_SIZE as u32; 325 self.dir_entries 326 .get_mut(&parent) 327 .unwrap() 328 .push(DirEntryBlock { 329 block_id, 330 offset: 0, 331 entries: Vec::new(), 332 }); 333 } 334 335 if typ == InodeType::Directory { 336 let parent = self.get_inode_mut(parent)?; 337 parent.links_count += 1; 338 } 339 340 let parent_dir = self 341 .dir_entries 342 .get_mut(&parent) 343 .ok_or_else(|| anyhow!("parent {:?} not found for {:?}", parent, inode))? 344 .last_mut() 345 .expect("directory entries must not be empty"); 346 347 let dir_entry = DirEntryWithName::new(arena, inode, typ, name, parent_dir)?; 348 349 parent_dir.entries.push(dir_entry); 350 351 Ok(()) 352 } 353 add_inode(&mut self, num: InodeNum, inode: &'a mut Inode) -> Result<()>354 fn add_inode(&mut self, num: InodeNum, inode: &'a mut Inode) -> Result<()> { 355 let typ = inode.typ().ok_or_else(|| anyhow!("unknown inode type"))?; 356 let group_id = self.group_num_for_inode(num); 357 let gm = &mut self.group_metadata[group_id]; 358 if gm.inode_table.contains_key(&num) { 359 bail!("inode {:?} already exists", &num); 360 } 361 362 if typ == InodeType::Directory { 363 gm.group_desc.used_dirs_count += 1; 364 } 365 366 gm.inode_table.insert(num, inode); 367 let inode_index = num.to_table_index() % self.sb.inodes_per_group as usize; 368 gm.inode_bitmap 369 .set(inode_index, true) 370 .with_context(|| format!("failed to set inode bitmap at {}", num.to_table_index()))?; 371 372 Ok(()) 373 } 374 375 // Creates a reserved directory such as "root" or "lost+found". 376 // So, inode is constructed from scratch. add_reserved_dir( &mut self, arena: &'a Arena<'a>, inode_num: InodeNum, parent_inode: InodeNum, name: &OsStr, xattr: Option<InlineXattrs>, ) -> Result<()>377 fn add_reserved_dir( 378 &mut self, 379 arena: &'a Arena<'a>, 380 inode_num: InodeNum, 381 parent_inode: InodeNum, 382 name: &OsStr, 383 xattr: Option<InlineXattrs>, 384 ) -> Result<()> { 385 let group_id = self.group_num_for_inode(inode_num); 386 let inode = Inode::new( 387 arena, 388 &mut self.group_metadata[group_id], 389 inode_num, 390 InodeType::Directory, 391 BLOCK_SIZE as u32, 392 xattr, 393 )?; 394 self.add_inode(inode_num, inode)?; 395 396 self.allocate_dir_entry( 397 arena, 398 inode_num, 399 inode_num, 400 InodeType::Directory, 401 OsStr::new("."), 402 )?; 403 self.allocate_dir_entry( 404 arena, 405 inode_num, 406 parent_inode, 407 InodeType::Directory, 408 OsStr::new(".."), 409 )?; 410 411 if inode_num != parent_inode { 412 self.allocate_dir_entry(arena, parent_inode, inode_num, InodeType::Directory, name)?; 413 } 414 415 Ok(()) 416 } 417 add_dir( &mut self, arena: &'a Arena<'a>, inode_num: InodeNum, parent_inode: InodeNum, path: &Path, ) -> Result<()>418 fn add_dir( 419 &mut self, 420 arena: &'a Arena<'a>, 421 inode_num: InodeNum, 422 parent_inode: InodeNum, 423 path: &Path, 424 ) -> Result<()> { 425 let group_id = self.group_num_for_inode(inode_num); 426 427 let xattr = InlineXattrs::from_path(path)?; 428 let inode = Inode::from_metadata( 429 arena, 430 &mut self.group_metadata[group_id], 431 inode_num, 432 &std::fs::metadata(path)?, 433 BLOCK_SIZE as u32, 434 0, 435 InodeBlocksCount::from_bytes_len(0), 436 InodeBlock::default(), 437 Some(xattr), 438 )?; 439 440 self.add_inode(inode_num, inode)?; 441 442 self.allocate_dir_entry( 443 arena, 444 inode_num, 445 inode_num, 446 InodeType::Directory, 447 OsStr::new("."), 448 )?; 449 self.allocate_dir_entry( 450 arena, 451 inode_num, 452 parent_inode, 453 InodeType::Directory, 454 OsStr::new(".."), 455 )?; 456 457 if inode_num != parent_inode { 458 let name = path 459 .file_name() 460 .ok_or_else(|| anyhow!("failed to get directory name"))?; 461 self.allocate_dir_entry(arena, parent_inode, inode_num, InodeType::Directory, name)?; 462 } 463 464 Ok(()) 465 } 466 467 /// Registers a file to be mmaped to the memory region. 468 /// This function just reserves a region for mmap() on `arena` and doesn't call mmap(). 469 /// It's `arena`'s owner's responsibility to call mmap() for the registered files at the end. register_mmap_file( &mut self, arena: &'a Arena<'a>, block_num: usize, file: &File, file_size: usize, mut file_offset: usize, ) -> Result<(Vec<BlockId>, usize)>470 fn register_mmap_file( 471 &mut self, 472 arena: &'a Arena<'a>, 473 block_num: usize, 474 file: &File, 475 file_size: usize, 476 mut file_offset: usize, 477 ) -> Result<(Vec<BlockId>, usize)> { 478 let contig_blocks = self.allocate_contiguous_blocks(block_num as u16)?; 479 480 let mut remaining = std::cmp::min(file_size - file_offset, block_num * BLOCK_SIZE); 481 let mut written = 0; 482 for blocks in &contig_blocks { 483 if remaining == 0 { 484 panic!("remaining == 0. This is a bug"); 485 } 486 let length = std::cmp::min(remaining, BLOCK_SIZE * blocks.len()); 487 let start_block = blocks[0]; 488 let mem_offset = u32::from(start_block) as usize * BLOCK_SIZE; 489 // Reserve the region in arena to prevent from overwriting metadata. 490 arena 491 .reserve_for_mmap( 492 mem_offset, 493 length, 494 file.try_clone().context("failed to clone file")?, 495 file_offset, 496 ) 497 .context("mmap for direct_block is already occupied")?; 498 remaining -= length; 499 written += length; 500 file_offset += length; 501 } 502 Ok((contig_blocks.concat(), written)) 503 } 504 fill_indirect_block( &mut self, arena: &'a Arena<'a>, indirect_table: BlockId, file: &File, file_size: usize, file_offset: usize, ) -> Result<usize>505 fn fill_indirect_block( 506 &mut self, 507 arena: &'a Arena<'a>, 508 indirect_table: BlockId, 509 file: &File, 510 file_size: usize, 511 file_offset: usize, 512 ) -> Result<usize> { 513 // We use a block as a table of indirect blocks. 514 // So, the maximum number of blocks supported by single indirect blocks is limited by the 515 // maximum number of entries in one block, which is (BLOCK_SIZE / 4) where 4 is the size of 516 // int. 517 let max_num_blocks = BLOCK_SIZE / 4; 518 let max_data_len = max_num_blocks * BLOCK_SIZE; 519 520 let length = std::cmp::min(file_size - file_offset, max_data_len); 521 let block_num = length.div_ceil(BLOCK_SIZE); 522 523 let (allocated_blocks, length) = self 524 .register_mmap_file(arena, block_num, file, file_size, file_offset) 525 .context("failed to reserve mmap regions on indirect block")?; 526 527 let slice = arena.allocate_slice(indirect_table, 0, 4 * block_num)?; 528 slice.copy_from_slice(allocated_blocks.as_bytes()); 529 530 Ok(length) 531 } 532 add_file( &mut self, arena: &'a Arena<'a>, parent_inode: InodeNum, path: &Path, ) -> Result<()>533 fn add_file( 534 &mut self, 535 arena: &'a Arena<'a>, 536 parent_inode: InodeNum, 537 path: &Path, 538 ) -> Result<()> { 539 let inode_num = self.allocate_inode()?; 540 541 let name = path 542 .file_name() 543 .ok_or_else(|| anyhow!("failed to get directory name"))?; 544 let file = File::open(path)?; 545 let file_size = file.metadata()?.len() as usize; 546 let mut block = InodeBlock::default(); 547 548 let mut written = 0; 549 let mut used_blocks = 0; 550 551 if file_size > 0 { 552 let block_num = std::cmp::min( 553 file_size.div_ceil(BLOCK_SIZE), 554 InodeBlock::NUM_DIRECT_BLOCKS, 555 ); 556 let (allocated_blocks, len) = self 557 .register_mmap_file(arena, block_num, &file, file_size, 0) 558 .context("failed to reserve mmap regions on direct block")?; 559 560 block.set_direct_blocks(&allocated_blocks)?; 561 written += len; 562 used_blocks += block_num; 563 } 564 565 // Indirect data block 566 if written < file_size { 567 let indirect_table = self.allocate_block()?; 568 block.set_indirect_block_table(&indirect_table)?; 569 used_blocks += 1; 570 571 let length = 572 self.fill_indirect_block(arena, indirect_table, &file, file_size, written)?; 573 written += length; 574 used_blocks += length.div_ceil(BLOCK_SIZE); 575 } 576 577 // Double-indirect data block 578 // Supporting double-indirect data block allows storing ~4GB files if 4GB block size is 579 // used. 580 if written < file_size { 581 let d_indirect_table = self.allocate_block()?; 582 block.set_double_indirect_block_table(&d_indirect_table)?; 583 used_blocks += 1; 584 585 let mut indirect_blocks: Vec<BlockId> = vec![]; 586 // Iterate (BLOCK_SIZE / 4) times, as each block id is 4-byte. 587 for _ in 0..BLOCK_SIZE / 4 { 588 if written >= file_size { 589 break; 590 } 591 let indirect_table = self.allocate_block()?; 592 indirect_blocks.push(indirect_table); 593 used_blocks += 1; 594 595 let length = self 596 .fill_indirect_block(arena, indirect_table, &file, file_size, written) 597 .context("failed to indirect block for doubly-indirect table")?; 598 written += length; 599 used_blocks += length.div_ceil(BLOCK_SIZE); 600 } 601 602 let d_table = arena.allocate_slice(d_indirect_table, 0, indirect_blocks.len() * 4)?; 603 d_table.copy_from_slice(indirect_blocks.as_bytes()); 604 } 605 606 if written != file_size { 607 unimplemented!("Triple-indirect block is not supported"); 608 } 609 610 let blocks = InodeBlocksCount::from_bytes_len((used_blocks * BLOCK_SIZE) as u32); 611 let group_id = self.group_num_for_inode(inode_num); 612 let size = file_size as u32; 613 614 let xattr = InlineXattrs::from_path(path)?; 615 let inode = Inode::from_metadata( 616 arena, 617 &mut self.group_metadata[group_id], 618 inode_num, 619 &std::fs::metadata(path)?, 620 size, 621 1, 622 blocks, 623 block, 624 Some(xattr), 625 )?; 626 627 self.add_inode(inode_num, inode)?; 628 self.allocate_dir_entry(arena, parent_inode, inode_num, InodeType::Regular, name)?; 629 630 Ok(()) 631 } 632 add_symlink( &mut self, arena: &'a Arena<'a>, parent: InodeNum, entry: &DirEntry, ) -> Result<()>633 fn add_symlink( 634 &mut self, 635 arena: &'a Arena<'a>, 636 parent: InodeNum, 637 entry: &DirEntry, 638 ) -> Result<()> { 639 let link = entry.path(); 640 let dst_path = std::fs::read_link(&link)?; 641 let dst = dst_path 642 .to_str() 643 .context("failed to convert symlink destination to str")?; 644 645 if dst.len() >= InodeBlock::max_inline_symlink_len() { 646 return self.add_long_symlink(arena, parent, &link, dst); 647 } 648 649 let inode_num = self.allocate_inode()?; 650 let mut block = InodeBlock::default(); 651 block.set_inline_symlink(dst)?; 652 let group_id = self.group_num_for_inode(inode_num); 653 let xattr = InlineXattrs::from_path(&link)?; 654 let inode = Inode::from_metadata( 655 arena, 656 &mut self.group_metadata[group_id], 657 inode_num, 658 &std::fs::symlink_metadata(&link)?, 659 dst.len() as u32, 660 1, //links_count, 661 InodeBlocksCount::from_bytes_len(0), 662 block, 663 Some(xattr), 664 )?; 665 self.add_inode(inode_num, inode)?; 666 667 let link_name = link.file_name().context("failed to get symlink name")?; 668 self.allocate_dir_entry(arena, parent, inode_num, InodeType::Symlink, link_name)?; 669 670 Ok(()) 671 } 672 add_long_symlink( &mut self, arena: &'a Arena<'a>, parent: InodeNum, link: &Path, dst: &str, ) -> Result<()>673 fn add_long_symlink( 674 &mut self, 675 arena: &'a Arena<'a>, 676 parent: InodeNum, 677 link: &Path, 678 dst: &str, 679 ) -> Result<()> { 680 let dst_len = dst.len(); 681 if dst_len > BLOCK_SIZE { 682 bail!("symlink longer than block size: {:?}", dst); 683 } 684 685 // Copy symlink's destination to the block. 686 let symlink_block = self.allocate_block()?; 687 let buf = arena.allocate_slice(symlink_block, 0, dst_len)?; 688 buf.copy_from_slice(dst.as_bytes()); 689 690 let inode_num = self.allocate_inode()?; 691 let mut block = InodeBlock::default(); 692 block.set_direct_blocks(&[symlink_block])?; 693 694 let group_id = self.group_num_for_inode(inode_num); 695 let xattr = InlineXattrs::from_path(link)?; 696 let inode = Inode::from_metadata( 697 arena, 698 &mut self.group_metadata[group_id], 699 inode_num, 700 &std::fs::symlink_metadata(link)?, 701 dst_len as u32, 702 1, //links_count, 703 InodeBlocksCount::from_bytes_len(BLOCK_SIZE as u32), 704 block, 705 Some(xattr), 706 )?; 707 self.add_inode(inode_num, inode)?; 708 709 let link_name = link.file_name().context("failed to get symlink name")?; 710 self.allocate_dir_entry(arena, parent, inode_num, InodeType::Symlink, link_name)?; 711 712 Ok(()) 713 } 714 715 /// Walks through `src_dir` and copies directories and files to the new file system. copy_dirtree<P: AsRef<Path>>( &mut self, arena: &'a Arena<'a>, src_dir: P, ) -> Result<()>716 pub(crate) fn copy_dirtree<P: AsRef<Path>>( 717 &mut self, 718 arena: &'a Arena<'a>, 719 src_dir: P, 720 ) -> Result<()> { 721 // Update the root directory's metadata with the metadata of `src_dir`. 722 let root_inode_num = InodeNum::new(2).expect("2 is a valid inode number"); 723 let group_id = self.group_num_for_inode(root_inode_num); 724 let gm = &mut self.group_metadata[group_id]; 725 let inode: &mut &mut Inode = gm 726 .inode_table 727 .get_mut(&root_inode_num) 728 .expect("root dir is not stored"); 729 let metadata = src_dir 730 .as_ref() 731 .metadata() 732 .with_context(|| format!("failed to get metadata of {:?}", src_dir.as_ref()))?; 733 inode.update_metadata(&metadata); 734 735 self.copy_dirtree_rec(arena, InodeNum(2), src_dir) 736 } 737 copy_dirtree_rec<P: AsRef<Path>>( &mut self, arena: &'a Arena<'a>, parent_inode: InodeNum, src_dir: P, ) -> Result<()>738 fn copy_dirtree_rec<P: AsRef<Path>>( 739 &mut self, 740 arena: &'a Arena<'a>, 741 parent_inode: InodeNum, 742 src_dir: P, 743 ) -> Result<()> { 744 for entry in std::fs::read_dir(&src_dir)? { 745 let entry = entry?; 746 let ftype = entry.file_type()?; 747 if ftype.is_dir() { 748 // Since we creates `/lost+found` on the root directory, ignore the existing one. 749 if parent_inode.0 == 2 && entry.path().file_name() == Some(OsStr::new("lost+found")) 750 { 751 info!("ext2: Ignore the existing /lost+found directory"); 752 continue; 753 } 754 let inode = self.allocate_inode()?; 755 self.add_dir(arena, inode, parent_inode, &entry.path()) 756 .with_context(|| { 757 format!( 758 "failed to add directory {:?} as inode={:?}", 759 entry.path(), 760 inode 761 ) 762 })?; 763 self.copy_dirtree_rec(arena, inode, entry.path())?; 764 } else if ftype.is_file() { 765 self.add_file(arena, parent_inode, &entry.path()) 766 .with_context(|| { 767 format!( 768 "failed to add file {:?} in inode={:?}", 769 entry.path(), 770 parent_inode 771 ) 772 })?; 773 } else if ftype.is_symlink() { 774 self.add_symlink(arena, parent_inode, &entry)?; 775 } else { 776 bail!("unknown file type {:?} for {:?}", ftype, entry.file_name()); 777 } 778 } 779 780 Ok(()) 781 } 782 copy_backup_metadata(self, arena: &'a Arena<'a>) -> Result<()>783 pub(crate) fn copy_backup_metadata(self, arena: &'a Arena<'a>) -> Result<()> { 784 // Copy superblock and group_metadata to every block group 785 for i in 1..self.sb.num_groups() as usize { 786 let super_block_id = BlockId::from(self.sb.blocks_per_group * i as u32); 787 let bg_desc_block_id = BlockId::from(u32::from(super_block_id) + 1); 788 self.sb.block_group_nr = i as u16; 789 arena.write_to_mem(super_block_id, 0, self.sb)?; 790 let mut offset = 0; 791 for gm in &self.group_metadata { 792 arena.write_to_mem(bg_desc_block_id, offset, gm.group_desc)?; 793 offset += std::mem::size_of::<BlockGroupDescriptor>(); 794 } 795 } 796 Ok(()) 797 } 798 } 799