1 // Copyright 2024 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 //! Defines an arena allocator backed by `base::MemoryMapping`.
6
7 use std::cell::RefCell;
8 use std::collections::BTreeSet;
9 use std::fs::File;
10
11 use anyhow::anyhow;
12 use anyhow::bail;
13 use anyhow::Context;
14 use anyhow::Result;
15 use base::MappedRegion;
16 use base::MemoryMapping;
17 use zerocopy::AsBytes;
18 use zerocopy::FromBytes;
19
20 #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
21 struct Region {
22 start: usize,
23 len: usize,
24 }
25
26 /// Manages a set of regions that are not overlapping each other.
27 #[derive(Default)]
28 struct RegionManager(BTreeSet<Region>);
29
30 impl RegionManager {
allocate(&mut self, start: usize, len: usize) -> Result<()>31 fn allocate(&mut self, start: usize, len: usize) -> Result<()> {
32 // Allocation needs to fail if there exists a region that overlaps with [start, start+len).
33 // A region r is overlapping with [start, start+len) if and only if:
34 // r.start <= (start+len) && start <= (r.start+r.len)
35 //
36 // So, we first find the last region where r.start <= (start+len) holds.
37 let left = self
38 .0
39 .range(
40 ..Region {
41 start: start + len,
42 len: 0,
43 },
44 )
45 .next_back()
46 .copied();
47
48 // New region to be added.
49 let new = match left {
50 None => Region { start, len },
51 Some(r) => {
52 if start < r.start + r.len {
53 bail!(
54 "range overlaps: existing: {:?}, new: {:?}",
55 left,
56 Region { start, len }
57 );
58 }
59
60 // if `r` and the new region is adjacent, merge them.
61 // otherwise, just return the new region.
62 if start == r.start + r.len {
63 let new = Region {
64 start: r.start,
65 len: r.len + len,
66 };
67 self.0.remove(&r);
68 new
69 } else {
70 Region { start, len }
71 }
72 }
73 };
74
75 // If there exists a region that starts from `new.start + new.len`,
76 // it should be merged with `new`.
77 let right = self
78 .0
79 .range(
80 Region {
81 start: new.start + new.len,
82 len: 0,
83 }..,
84 )
85 .next()
86 .copied();
87 match right {
88 Some(r) if r.start == new.start + new.len => {
89 // merge and insert
90 let merged = Region {
91 start: new.start,
92 len: new.len + r.len,
93 };
94 self.0.remove(&r);
95 self.0.insert(merged);
96 }
97 Some(_) | None => {
98 // just insert
99 self.0.insert(new);
100 }
101 }
102
103 Ok(())
104 }
105
106 #[cfg(test)]
to_vec(&self) -> Vec<&Region>107 fn to_vec(&self) -> Vec<&Region> {
108 self.0.iter().collect()
109 }
110 }
111
112 #[test]
test_region_manager()113 fn test_region_manager() {
114 let mut rm: RegionManager = Default::default();
115
116 rm.allocate(0, 5).unwrap();
117 assert_eq!(rm.to_vec(), vec![&Region { start: 0, len: 5 }]);
118 rm.allocate(10, 5).unwrap();
119 rm.allocate(15, 5).unwrap(); // will be merged into the previous one
120 assert_eq!(
121 rm.to_vec(),
122 vec![&Region { start: 0, len: 5 }, &Region { start: 10, len: 10 }]
123 );
124 rm.allocate(3, 5).unwrap_err(); // fail
125 rm.allocate(8, 5).unwrap_err(); // fail
126
127 rm.allocate(25, 5).unwrap();
128 assert_eq!(
129 rm.to_vec(),
130 vec![
131 &Region { start: 0, len: 5 },
132 &Region { start: 10, len: 10 },
133 &Region { start: 25, len: 5 }
134 ]
135 );
136
137 rm.allocate(5, 5).unwrap(); // will be merged to the existing two regions
138 assert_eq!(
139 rm.to_vec(),
140 vec![&Region { start: 0, len: 20 }, &Region { start: 25, len: 5 }]
141 );
142 rm.allocate(20, 5).unwrap();
143 assert_eq!(rm.to_vec(), vec![&Region { start: 0, len: 30 },]);
144 }
145
146 #[derive(Debug, Clone, Copy, AsBytes)]
147 #[repr(C)]
148 /// Represents a ID of a disk block.
149 pub struct BlockId(u32);
150
151 impl From<u32> for BlockId {
from(value: u32) -> Self152 fn from(value: u32) -> Self {
153 BlockId(value)
154 }
155 }
156
157 impl From<BlockId> for u32 {
from(value: BlockId) -> Self158 fn from(value: BlockId) -> Self {
159 value.0
160 }
161 }
162
163 impl BlockId {
as_bytes(&self) -> &[u8]164 pub fn as_bytes(&self) -> &[u8] {
165 self.0.as_bytes()
166 }
167 }
168
169 /// Information on how to mmap a host file to ext2 blocks.
170 pub struct FileMappingInfo {
171 /// Offset in the memory that a file is mapped to.
172 pub mem_offset: usize,
173 /// The file to be mmap'd.
174 pub file: File,
175 /// The length of the mapping.
176 pub length: usize,
177 /// Offset in the file to start the mapping.
178 pub file_offset: usize,
179 }
180
181 /// Memory arena backed by `base::MemoryMapping`.
182 ///
183 /// This struct takes a mutable referencet to the memory mapping so this arena won't arena the
184 /// region.
185 pub struct Arena<'a> {
186 mem: &'a mut MemoryMapping,
187 block_size: usize,
188 /// A set of regions that are not overlapping each other.
189 /// Use `RefCell` for interior mutability because the mutablity of `RegionManager` should be
190 /// independent from the mutability of the memory mapping.
191 regions: RefCell<RegionManager>,
192
193 mappings: RefCell<Vec<FileMappingInfo>>,
194 }
195
196 impl<'a> Arena<'a> {
197 /// Create a new arena backed by `len` bytes of `base::MemoryMapping`.
new(block_size: usize, mem: &'a mut MemoryMapping) -> Result<Self>198 pub fn new(block_size: usize, mem: &'a mut MemoryMapping) -> Result<Self> {
199 Ok(Self {
200 mem,
201 block_size,
202 regions: Default::default(),
203 mappings: Default::default(),
204 })
205 }
206
207 /// A helper function to mark a region as reserved.
reserve(&self, mem_offset: usize, len: usize) -> Result<()>208 fn reserve(&self, mem_offset: usize, len: usize) -> Result<()> {
209 let mem_end = mem_offset.checked_add(len).context("mem_end overflow")?;
210
211 if mem_end > self.mem.size() {
212 bail!(
213 "out of memory region: {mem_offset} + {len} > {}",
214 self.mem.size()
215 );
216 }
217
218 self.regions.borrow_mut().allocate(mem_offset, len)?;
219
220 Ok(())
221 }
222
223 /// Reserves a region for mmap and stores the mmap information.
224 /// Note that `Arena` will not call mmap(). Instead, the owner of `Arena` instance must call
225 /// `into_mapping_info()` to retrieve the mapping information and call mmap later instead.
reserve_for_mmap( &self, mem_offset: usize, length: usize, file: File, file_offset: usize, ) -> Result<()>226 pub fn reserve_for_mmap(
227 &self,
228 mem_offset: usize,
229 length: usize,
230 file: File,
231 file_offset: usize,
232 ) -> Result<()> {
233 self.reserve(mem_offset, length)?;
234 self.mappings.borrow_mut().push(FileMappingInfo {
235 mem_offset,
236 length,
237 file: file.try_clone()?,
238 file_offset,
239 });
240
241 Ok(())
242 }
243
244 /// Allocate a new slice on an anonymous memory.
245 /// `Arena` structs guarantees that this area is not overlapping with other regions.
allocate_slice( &self, block: BlockId, block_offset: usize, len: usize, ) -> Result<&'a mut [u8]>246 pub fn allocate_slice(
247 &self,
248 block: BlockId,
249 block_offset: usize,
250 len: usize,
251 ) -> Result<&'a mut [u8]> {
252 let offset = u32::from(block) as usize * self.block_size + block_offset;
253 self.reserve(offset, len)?;
254
255 let new_addr = (self.mem.as_ptr() as usize)
256 .checked_add(offset)
257 .context("address overflow")?;
258
259 // SAFETY: the memory region [new_addr, new_addr+len) is guaranteed to be valid.
260 let slice = unsafe { std::slice::from_raw_parts_mut(new_addr as *mut u8, len) };
261 Ok(slice)
262 }
263
264 /// Allocate a new region for a value with type `T`.
allocate<T: AsBytes + FromBytes + Sized>( &self, block: BlockId, block_offset: usize, ) -> Result<&'a mut T>265 pub fn allocate<T: AsBytes + FromBytes + Sized>(
266 &self,
267 block: BlockId,
268 block_offset: usize,
269 ) -> Result<&'a mut T> {
270 let slice = self.allocate_slice(block, block_offset, std::mem::size_of::<T>())?;
271 T::mut_from(slice).ok_or_else(|| anyhow!("failed to interpret"))
272 }
273
write_to_mem<T: AsBytes + FromBytes + Sized>( &self, block_id: BlockId, block_offset: usize, value: &T, ) -> Result<()>274 pub fn write_to_mem<T: AsBytes + FromBytes + Sized>(
275 &self,
276 block_id: BlockId,
277 block_offset: usize,
278 value: &T,
279 ) -> Result<()> {
280 let slice = self.allocate_slice(block_id, block_offset, std::mem::size_of::<T>())?;
281 slice.copy_from_slice(value.as_bytes());
282 Ok(())
283 }
284
285 /// Consumes `Arena` and retrieve mmap information.
into_mapping_info(self) -> Vec<FileMappingInfo>286 pub fn into_mapping_info(self) -> Vec<FileMappingInfo> {
287 self.mappings.take()
288 }
289 }
290