1 // Copyright 2017 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 //! Types for volatile access to memory.
6 //!
7 //! Two of the core rules for safe rust is no data races and no aliased mutable references.
8 //! `VolatileSlice`, along with types that produce it which implement
9 //! `VolatileMemory`, allow us to sidestep that rule by wrapping pointers that absolutely have to be
10 //! accessed volatile. Some systems really do need to operate on shared memory and can't have the
11 //! compiler reordering or eliding access because it has no visibility into what other systems are
12 //! doing with that hunk of memory.
13 //!
14 //! For the purposes of maintaining safety, volatile memory has some rules of its own:
15 //! 1. No references or slices to volatile memory (`&` or `&mut`).
16 //! 2. Access should always been done with a volatile read or write.
17 //!
18 //! The first rule is because having references of any kind to memory considered volatile would
19 //! violate pointer aliasing. The second is because unvolatile accesses are inherently undefined if
20 //! done concurrently without synchronization. With volatile access we know that the compiler has
21 //! not reordered or elided the access.
22
23 use std::cmp::min;
24 use std::mem::size_of;
25 use std::ptr::copy;
26 use std::ptr::read_volatile;
27 use std::ptr::write_bytes;
28 use std::ptr::write_volatile;
29 use std::result;
30 use std::slice;
31
32 use remain::sorted;
33 use thiserror::Error;
34 use zerocopy::AsBytes;
35 use zerocopy::FromBytes;
36 use zerocopy::Ref;
37
38 use crate::IoBufMut;
39
40 #[sorted]
41 #[derive(Error, Eq, PartialEq, Debug)]
42 pub enum VolatileMemoryError {
43 /// `addr` is out of bounds of the volatile memory slice.
44 #[error("address 0x{addr:x} is out of bounds")]
45 OutOfBounds { addr: usize },
46 /// Taking a slice at `base` with `offset` would overflow `usize`.
47 #[error("address 0x{base:x} offset by 0x{offset:x} would overflow")]
48 Overflow { base: usize, offset: usize },
49 }
50
51 pub type VolatileMemoryResult<T> = result::Result<T, VolatileMemoryError>;
52
53 use crate::VolatileMemoryError as Error;
54 type Result<T> = VolatileMemoryResult<T>;
55
56 /// Trait for types that support raw volatile access to their data.
57 pub trait VolatileMemory {
58 /// Gets a slice of memory at `offset` that is `count` bytes in length and supports volatile
59 /// access.
get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice>60 fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice>;
61 }
62
63 /// A slice of raw memory that supports volatile access. Like `std::io::IoSliceMut`, this type is
64 /// guaranteed to be ABI-compatible with `libc::iovec` but unlike `IoSliceMut`, it doesn't
65 /// automatically deref to `&mut [u8]`.
66 #[derive(Copy, Clone, Debug)]
67 #[repr(transparent)]
68 pub struct VolatileSlice<'a>(IoBufMut<'a>);
69
70 impl<'a> VolatileSlice<'a> {
71 /// Creates a slice of raw memory that must support volatile access.
new(buf: &mut [u8]) -> VolatileSlice72 pub fn new(buf: &mut [u8]) -> VolatileSlice {
73 VolatileSlice(IoBufMut::new(buf))
74 }
75
76 /// Creates a `VolatileSlice` from a pointer and a length.
77 ///
78 /// # Safety
79 ///
80 /// In order to use this method safely, `addr` must be valid for reads and writes of `len` bytes
81 /// and should live for the entire duration of lifetime `'a`.
from_raw_parts(addr: *mut u8, len: usize) -> VolatileSlice<'a>82 pub unsafe fn from_raw_parts(addr: *mut u8, len: usize) -> VolatileSlice<'a> {
83 VolatileSlice(IoBufMut::from_raw_parts(addr, len))
84 }
85
86 /// Gets a const pointer to this slice's memory.
as_ptr(&self) -> *const u887 pub fn as_ptr(&self) -> *const u8 {
88 self.0.as_ptr()
89 }
90
91 /// Gets a mutable pointer to this slice's memory.
as_mut_ptr(&self) -> *mut u892 pub fn as_mut_ptr(&self) -> *mut u8 {
93 self.0.as_mut_ptr()
94 }
95
96 /// Gets the size of this slice.
size(&self) -> usize97 pub fn size(&self) -> usize {
98 self.0.len()
99 }
100
101 /// Advance the starting position of this slice.
102 ///
103 /// Panics if `count > self.size()`.
advance(&mut self, count: usize)104 pub fn advance(&mut self, count: usize) {
105 self.0.advance(count)
106 }
107
108 /// Shorten the length of the slice.
109 ///
110 /// Has no effect if `len > self.size()`.
truncate(&mut self, len: usize)111 pub fn truncate(&mut self, len: usize) {
112 self.0.truncate(len)
113 }
114
115 /// Returns this `VolatileSlice` as an `IoBufMut`.
as_iobuf(&self) -> &IoBufMut116 pub fn as_iobuf(&self) -> &IoBufMut {
117 &self.0
118 }
119
120 /// Converts a slice of `VolatileSlice`s into a slice of `IoBufMut`s
121 #[allow(clippy::wrong_self_convention)]
as_iobufs<'mem, 'slice>( iovs: &'slice [VolatileSlice<'mem>], ) -> &'slice [IoBufMut<'mem>]122 pub fn as_iobufs<'mem, 'slice>(
123 iovs: &'slice [VolatileSlice<'mem>],
124 ) -> &'slice [IoBufMut<'mem>] {
125 // SAFETY:
126 // Safe because `VolatileSlice` is ABI-compatible with `IoBufMut`.
127 unsafe { slice::from_raw_parts(iovs.as_ptr() as *const IoBufMut, iovs.len()) }
128 }
129
130 /// Converts a mutable slice of `VolatileSlice`s into a mutable slice of `IoBufMut`s
131 #[inline]
as_iobufs_mut<'mem, 'slice>( iovs: &'slice mut [VolatileSlice<'mem>], ) -> &'slice mut [IoBufMut<'mem>]132 pub fn as_iobufs_mut<'mem, 'slice>(
133 iovs: &'slice mut [VolatileSlice<'mem>],
134 ) -> &'slice mut [IoBufMut<'mem>] {
135 // SAFETY:
136 // Safe because `VolatileSlice` is ABI-compatible with `IoBufMut`.
137 unsafe { slice::from_raw_parts_mut(iovs.as_mut_ptr() as *mut IoBufMut, iovs.len()) }
138 }
139
140 /// Creates a copy of this slice with the address increased by `count` bytes, and the size
141 /// reduced by `count` bytes.
offset(self, count: usize) -> Result<VolatileSlice<'a>>142 pub fn offset(self, count: usize) -> Result<VolatileSlice<'a>> {
143 let new_addr = (self.as_mut_ptr() as usize).checked_add(count).ok_or(
144 VolatileMemoryError::Overflow {
145 base: self.as_mut_ptr() as usize,
146 offset: count,
147 },
148 )?;
149 let new_size = self
150 .size()
151 .checked_sub(count)
152 .ok_or(VolatileMemoryError::OutOfBounds { addr: new_addr })?;
153
154 // SAFETY:
155 // Safe because the memory has the same lifetime and points to a subset of the memory of the
156 // original slice.
157 unsafe { Ok(VolatileSlice::from_raw_parts(new_addr as *mut u8, new_size)) }
158 }
159
160 /// Similar to `get_slice` but the returned slice outlives this slice.
161 ///
162 /// The returned slice's lifetime is still limited by the underlying data's lifetime.
sub_slice(self, offset: usize, count: usize) -> Result<VolatileSlice<'a>>163 pub fn sub_slice(self, offset: usize, count: usize) -> Result<VolatileSlice<'a>> {
164 let mem_end = offset
165 .checked_add(count)
166 .ok_or(VolatileMemoryError::Overflow {
167 base: offset,
168 offset: count,
169 })?;
170 if mem_end > self.size() {
171 return Err(Error::OutOfBounds { addr: mem_end });
172 }
173 let new_addr = (self.as_mut_ptr() as usize).checked_add(offset).ok_or(
174 VolatileMemoryError::Overflow {
175 base: self.as_mut_ptr() as usize,
176 offset,
177 },
178 )?;
179
180 // SAFETY:
181 // Safe because we have verified that the new memory is a subset of the original slice.
182 Ok(unsafe { VolatileSlice::from_raw_parts(new_addr as *mut u8, count) })
183 }
184
185 /// Sets each byte of this slice with the given byte, similar to `memset`.
186 ///
187 /// The bytes of this slice are accessed in an arbitray order.
188 ///
189 /// # Examples
190 ///
191 /// ```
192 /// # use base::VolatileSlice;
193 /// # fn test_write_45() -> Result<(), ()> {
194 /// let mut mem = [0u8; 32];
195 /// let vslice = VolatileSlice::new(&mut mem[..]);
196 /// vslice.write_bytes(45);
197 /// for &v in &mem[..] {
198 /// assert_eq!(v, 45);
199 /// }
200 /// # Ok(())
201 /// # }
write_bytes(&self, value: u8)202 pub fn write_bytes(&self, value: u8) {
203 // SAFETY:
204 // Safe because the memory is valid and needs only byte alignment.
205 unsafe {
206 write_bytes(self.as_mut_ptr(), value, self.size());
207 }
208 }
209
210 /// Copies `self.size()` or `buf.len()` times the size of `T` bytes, whichever is smaller, to
211 /// `buf`.
212 ///
213 /// The copy happens from smallest to largest address in `T` sized chunks using volatile reads.
214 ///
215 /// # Examples
216 ///
217 /// ```
218 /// # use std::fs::File;
219 /// # use std::path::Path;
220 /// # use base::VolatileSlice;
221 /// # fn test_write_null() -> Result<(), ()> {
222 /// let mut mem = [0u8; 32];
223 /// let vslice = VolatileSlice::new(&mut mem[..]);
224 /// let mut buf = [5u8; 16];
225 /// vslice.copy_to(&mut buf[..]);
226 /// for v in &buf[..] {
227 /// assert_eq!(buf[0], 0);
228 /// }
229 /// # Ok(())
230 /// # }
231 /// ```
copy_to<T>(&self, buf: &mut [T]) where T: FromBytes + AsBytes + Copy,232 pub fn copy_to<T>(&self, buf: &mut [T])
233 where
234 T: FromBytes + AsBytes + Copy,
235 {
236 let mut addr = self.as_mut_ptr() as *const u8;
237 for v in buf.iter_mut().take(self.size() / size_of::<T>()) {
238 // SAFETY: Safe because buf is valid, aligned to type `T` and is initialized.
239 unsafe {
240 *v = read_volatile(addr as *const T);
241 addr = addr.add(size_of::<T>());
242 }
243 }
244 }
245
246 /// Copies `self.size()` or `slice.size()` bytes, whichever is smaller, to `slice`.
247 ///
248 /// The copies happen in an undefined order.
249 /// # Examples
250 ///
251 /// ```
252 /// # use base::VolatileMemory;
253 /// # use base::VolatileSlice;
254 /// # fn test_write_null() -> Result<(), ()> {
255 /// let mut mem = [0u8; 32];
256 /// let vslice = VolatileSlice::new(&mut mem[..]);
257 /// vslice.copy_to_volatile_slice(vslice.get_slice(16, 16).map_err(|_| ())?);
258 /// # Ok(())
259 /// # }
260 /// ```
copy_to_volatile_slice(&self, slice: VolatileSlice)261 pub fn copy_to_volatile_slice(&self, slice: VolatileSlice) {
262 // SAFETY: Safe because slice is valid and is byte aligned.
263 unsafe {
264 copy(
265 self.as_mut_ptr() as *const u8,
266 slice.as_mut_ptr(),
267 min(self.size(), slice.size()),
268 );
269 }
270 }
271
272 /// Copies `self.size()` or `buf.len()` times the size of `T` bytes, whichever is smaller, to
273 /// this slice's memory.
274 ///
275 /// The copy happens from smallest to largest address in `T` sized chunks using volatile writes.
276 ///
277 /// # Examples
278 ///
279 /// ```
280 /// # use std::fs::File;
281 /// # use std::path::Path;
282 /// # use base::VolatileMemory;
283 /// # use base::VolatileSlice;
284 /// # fn test_write_null() -> Result<(), ()> {
285 /// let mut mem = [0u8; 32];
286 /// let vslice = VolatileSlice::new(&mut mem[..]);
287 /// let buf = [5u8; 64];
288 /// vslice.copy_from(&buf[..]);
289 /// let mut copy_buf = [0u32; 4];
290 /// vslice.copy_to(&mut copy_buf);
291 /// for i in 0..4 {
292 /// assert_eq!(copy_buf[i], 0x05050505);
293 /// }
294 /// # Ok(())
295 /// # }
296 /// ```
copy_from<T>(&self, buf: &[T]) where T: FromBytes + AsBytes,297 pub fn copy_from<T>(&self, buf: &[T])
298 where
299 T: FromBytes + AsBytes,
300 {
301 let mut addr = self.as_mut_ptr();
302 for v in buf.iter().take(self.size() / size_of::<T>()) {
303 // SAFETY: Safe because buf is valid, aligned to type `T` and is mutable.
304 unsafe {
305 write_volatile(
306 addr as *mut T,
307 Ref::<_, T>::new(v.as_bytes()).unwrap().read(),
308 );
309 addr = addr.add(size_of::<T>());
310 }
311 }
312 }
313
314 /// Returns whether all bytes in this slice are zero or not.
315 ///
316 /// This is optimized for [VolatileSlice] aligned with 16 bytes.
317 ///
318 /// TODO(b/274840085): Use SIMD for better performance.
is_all_zero(&self) -> bool319 pub fn is_all_zero(&self) -> bool {
320 const MASK_4BIT: usize = 0x0f;
321 let head_addr = self.as_ptr() as usize;
322 // Round up by 16
323 let aligned_head_addr = (head_addr + MASK_4BIT) & !MASK_4BIT;
324 let tail_addr = head_addr + self.size();
325 // Round down by 16
326 let aligned_tail_addr = tail_addr & !MASK_4BIT;
327
328 // Check 16 bytes at once. The addresses should be 16 bytes aligned for better performance.
329 if (aligned_head_addr..aligned_tail_addr).step_by(16).any(
330 |aligned_addr|
331 // SAFETY: Each aligned_addr is within VolatileSlice
332 unsafe { *(aligned_addr as *const u128) } != 0,
333 ) {
334 return false;
335 }
336
337 if head_addr == aligned_head_addr && tail_addr == aligned_tail_addr {
338 // If head_addr and tail_addr are aligned, we can skip the unaligned part which contains
339 // at least 2 conditional branches.
340 true
341 } else {
342 // Check unaligned part.
343 // SAFETY: The range [head_addr, aligned_head_addr) and [aligned_tail_addr, tail_addr)
344 // are within VolatileSlice.
345 unsafe {
346 is_all_zero_naive(head_addr, aligned_head_addr)
347 && is_all_zero_naive(aligned_tail_addr, tail_addr)
348 }
349 }
350 }
351 }
352
353 /// Check whether every byte is zero.
354 ///
355 /// This checks byte by byte.
356 ///
357 /// # Safety
358 ///
359 /// * `head_addr` <= `tail_addr`
360 /// * Bytes between `head_addr` and `tail_addr` is valid to access.
is_all_zero_naive(head_addr: usize, tail_addr: usize) -> bool361 unsafe fn is_all_zero_naive(head_addr: usize, tail_addr: usize) -> bool {
362 (head_addr..tail_addr).all(|addr| *(addr as *const u8) == 0)
363 }
364
365 impl<'a> VolatileMemory for VolatileSlice<'a> {
get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice>366 fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice> {
367 self.sub_slice(offset, count)
368 }
369 }
370
371 impl PartialEq<VolatileSlice<'_>> for VolatileSlice<'_> {
eq(&self, other: &VolatileSlice) -> bool372 fn eq(&self, other: &VolatileSlice) -> bool {
373 let size = self.size();
374 if size != other.size() {
375 return false;
376 }
377
378 // SAFETY: We pass pointers into valid VolatileSlice regions, and size is checked above.
379 let cmp = unsafe { libc::memcmp(self.as_ptr() as _, other.as_ptr() as _, size) };
380
381 cmp == 0
382 }
383 }
384
385 /// The `PartialEq` implementation for `VolatileSlice` is reflexive, symmetric, and transitive.
386 impl Eq for VolatileSlice<'_> {}
387
388 impl std::io::Write for VolatileSlice<'_> {
write(&mut self, buf: &[u8]) -> std::io::Result<usize>389 fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
390 let len = buf.len().min(self.size());
391 self.copy_from(&buf[..len]);
392 self.advance(len);
393 Ok(len)
394 }
395
flush(&mut self) -> std::io::Result<()>396 fn flush(&mut self) -> std::io::Result<()> {
397 Ok(())
398 }
399 }
400
401 #[cfg(test)]
402 mod tests {
403 use std::io::Write;
404 use std::sync::Arc;
405 use std::sync::Barrier;
406 use std::thread::spawn;
407
408 use super::*;
409
410 #[derive(Clone)]
411 struct VecMem {
412 mem: Arc<Vec<u8>>,
413 }
414
415 impl VecMem {
new(size: usize) -> VecMem416 fn new(size: usize) -> VecMem {
417 VecMem {
418 mem: Arc::new(vec![0u8; size]),
419 }
420 }
421 }
422
423 impl VolatileMemory for VecMem {
get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice>424 fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice> {
425 let mem_end = offset
426 .checked_add(count)
427 .ok_or(VolatileMemoryError::Overflow {
428 base: offset,
429 offset: count,
430 })?;
431 if mem_end > self.mem.len() {
432 return Err(Error::OutOfBounds { addr: mem_end });
433 }
434
435 let new_addr = (self.mem.as_ptr() as usize).checked_add(offset).ok_or(
436 VolatileMemoryError::Overflow {
437 base: self.mem.as_ptr() as usize,
438 offset,
439 },
440 )?;
441
442 Ok(
443 // SAFETY: trivially safe
444 unsafe { VolatileSlice::from_raw_parts(new_addr as *mut u8, count) },
445 )
446 }
447 }
448
449 #[test]
observe_mutate()450 fn observe_mutate() {
451 let a = VecMem::new(1);
452 let a_clone = a.clone();
453 a.get_slice(0, 1).unwrap().write_bytes(99);
454
455 let start_barrier = Arc::new(Barrier::new(2));
456 let thread_start_barrier = start_barrier.clone();
457 let end_barrier = Arc::new(Barrier::new(2));
458 let thread_end_barrier = end_barrier.clone();
459 spawn(move || {
460 thread_start_barrier.wait();
461 a_clone.get_slice(0, 1).unwrap().write_bytes(0);
462 thread_end_barrier.wait();
463 });
464
465 let mut byte = [0u8; 1];
466 a.get_slice(0, 1).unwrap().copy_to(&mut byte);
467 assert_eq!(byte[0], 99);
468
469 start_barrier.wait();
470 end_barrier.wait();
471
472 a.get_slice(0, 1).unwrap().copy_to(&mut byte);
473 assert_eq!(byte[0], 0);
474 }
475
476 #[test]
slice_size()477 fn slice_size() {
478 let a = VecMem::new(100);
479 let s = a.get_slice(0, 27).unwrap();
480 assert_eq!(s.size(), 27);
481
482 let s = a.get_slice(34, 27).unwrap();
483 assert_eq!(s.size(), 27);
484
485 let s = s.get_slice(20, 5).unwrap();
486 assert_eq!(s.size(), 5);
487 }
488
489 #[test]
slice_overflow_error()490 fn slice_overflow_error() {
491 let a = VecMem::new(1);
492 let res = a.get_slice(usize::MAX, 1).unwrap_err();
493 assert_eq!(
494 res,
495 Error::Overflow {
496 base: usize::MAX,
497 offset: 1,
498 }
499 );
500 }
501
502 #[test]
slice_oob_error()503 fn slice_oob_error() {
504 let a = VecMem::new(100);
505 a.get_slice(50, 50).unwrap();
506 let res = a.get_slice(55, 50).unwrap_err();
507 assert_eq!(res, Error::OutOfBounds { addr: 105 });
508 }
509
510 #[test]
is_all_zero_16bytes_aligned()511 fn is_all_zero_16bytes_aligned() {
512 let a = VecMem::new(1024);
513 let slice = a.get_slice(0, 1024).unwrap();
514
515 assert!(slice.is_all_zero());
516 a.get_slice(129, 1).unwrap().write_bytes(1);
517 assert!(!slice.is_all_zero());
518 }
519
520 #[test]
is_all_zero_head_not_aligned()521 fn is_all_zero_head_not_aligned() {
522 let a = VecMem::new(1024);
523 let slice = a.get_slice(1, 1023).unwrap();
524
525 assert!(slice.is_all_zero());
526 a.get_slice(0, 1).unwrap().write_bytes(1);
527 assert!(slice.is_all_zero());
528 a.get_slice(1, 1).unwrap().write_bytes(1);
529 assert!(!slice.is_all_zero());
530 a.get_slice(1, 1).unwrap().write_bytes(0);
531 a.get_slice(129, 1).unwrap().write_bytes(1);
532 assert!(!slice.is_all_zero());
533 }
534
535 #[test]
is_all_zero_tail_not_aligned()536 fn is_all_zero_tail_not_aligned() {
537 let a = VecMem::new(1024);
538 let slice = a.get_slice(0, 1023).unwrap();
539
540 assert!(slice.is_all_zero());
541 a.get_slice(1023, 1).unwrap().write_bytes(1);
542 assert!(slice.is_all_zero());
543 a.get_slice(1022, 1).unwrap().write_bytes(1);
544 assert!(!slice.is_all_zero());
545 a.get_slice(1022, 1).unwrap().write_bytes(0);
546 a.get_slice(0, 1).unwrap().write_bytes(1);
547 assert!(!slice.is_all_zero());
548 }
549
550 #[test]
is_all_zero_no_aligned_16bytes()551 fn is_all_zero_no_aligned_16bytes() {
552 let a = VecMem::new(1024);
553 let slice = a.get_slice(1, 16).unwrap();
554
555 assert!(slice.is_all_zero());
556 a.get_slice(0, 1).unwrap().write_bytes(1);
557 assert!(slice.is_all_zero());
558 for i in 1..17 {
559 a.get_slice(i, 1).unwrap().write_bytes(1);
560 assert!(!slice.is_all_zero());
561 a.get_slice(i, 1).unwrap().write_bytes(0);
562 }
563 a.get_slice(17, 1).unwrap().write_bytes(1);
564 assert!(slice.is_all_zero());
565 }
566
567 #[test]
write_partial()568 fn write_partial() {
569 let mem = VecMem::new(1024);
570 let mut slice = mem.get_slice(1, 16).unwrap();
571 slice.write_bytes(0xCC);
572
573 // Writing 4 bytes should succeed and advance the slice by 4 bytes.
574 let write_len = slice.write(&[1, 2, 3, 4]).unwrap();
575 assert_eq!(write_len, 4);
576 assert_eq!(slice.size(), 16 - 4);
577
578 // The written data should appear in the memory at offset 1.
579 assert_eq!(mem.mem[1..=4], [1, 2, 3, 4]);
580
581 // The next byte of the slice should be unmodified.
582 assert_eq!(mem.mem[5], 0xCC);
583 }
584
585 #[test]
write_multiple()586 fn write_multiple() {
587 let mem = VecMem::new(1024);
588 let mut slice = mem.get_slice(1, 16).unwrap();
589 slice.write_bytes(0xCC);
590
591 // Writing 4 bytes should succeed and advance the slice by 4 bytes.
592 let write_len = slice.write(&[1, 2, 3, 4]).unwrap();
593 assert_eq!(write_len, 4);
594 assert_eq!(slice.size(), 16 - 4);
595
596 // The next byte of the slice should be unmodified.
597 assert_eq!(mem.mem[5], 0xCC);
598
599 // Writing another 4 bytes should succeed and advance the slice again.
600 let write2_len = slice.write(&[5, 6, 7, 8]).unwrap();
601 assert_eq!(write2_len, 4);
602 assert_eq!(slice.size(), 16 - 4 - 4);
603
604 // The written data should appear in the memory at offset 1.
605 assert_eq!(mem.mem[1..=8], [1, 2, 3, 4, 5, 6, 7, 8]);
606
607 // The next byte of the slice should be unmodified.
608 assert_eq!(mem.mem[9], 0xCC);
609 }
610
611 #[test]
write_exact_slice_size()612 fn write_exact_slice_size() {
613 let mem = VecMem::new(1024);
614 let mut slice = mem.get_slice(1, 4).unwrap();
615 slice.write_bytes(0xCC);
616
617 // Writing 4 bytes should succeed and consume the entire slice.
618 let write_len = slice.write(&[1, 2, 3, 4]).unwrap();
619 assert_eq!(write_len, 4);
620 assert_eq!(slice.size(), 0);
621
622 // The written data should appear in the memory at offset 1.
623 assert_eq!(mem.mem[1..=4], [1, 2, 3, 4]);
624
625 // The byte after the slice should be unmodified.
626 assert_eq!(mem.mem[5], 0);
627 }
628
629 #[test]
write_more_than_slice_size()630 fn write_more_than_slice_size() {
631 let mem = VecMem::new(1024);
632 let mut slice = mem.get_slice(1, 4).unwrap();
633 slice.write_bytes(0xCC);
634
635 // Attempting to write 5 bytes should succeed but only write 4 bytes.
636 let write_len = slice.write(&[1, 2, 3, 4, 5]).unwrap();
637 assert_eq!(write_len, 4);
638 assert_eq!(slice.size(), 0);
639
640 // The written data should appear in the memory at offset 1.
641 assert_eq!(mem.mem[1..=4], [1, 2, 3, 4]);
642
643 // The byte after the slice should be unmodified.
644 assert_eq!(mem.mem[5], 0);
645 }
646
647 #[test]
write_empty_slice()648 fn write_empty_slice() {
649 let mem = VecMem::new(1024);
650 let mut slice = mem.get_slice(1, 0).unwrap();
651
652 // Writing to an empty slice should always return 0.
653 assert_eq!(slice.write(&[1, 2, 3, 4]).unwrap(), 0);
654 assert_eq!(slice.write(&[5, 6, 7, 8]).unwrap(), 0);
655 assert_eq!(slice.write(&[]).unwrap(), 0);
656 }
657 }
658