1 // Copyright 2023 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 use std::cmp;
6 use std::io::Read;
7 use std::io::Write;
8
9 use base::warn;
10 use data_model::Be16;
11 use data_model::Be32;
12 use data_model::Be64;
13 use zerocopy::AsBytes;
14 use zerocopy::FromBytes;
15 use zerocopy::FromZeroes;
16
17 use crate::virtio::scsi::constants::INQUIRY;
18 use crate::virtio::scsi::constants::MAINTENANCE_IN;
19 use crate::virtio::scsi::constants::MODE_SELECT_6;
20 use crate::virtio::scsi::constants::MODE_SENSE_6;
21 use crate::virtio::scsi::constants::READ_10;
22 use crate::virtio::scsi::constants::READ_6;
23 use crate::virtio::scsi::constants::READ_CAPACITY_10;
24 use crate::virtio::scsi::constants::READ_CAPACITY_16;
25 use crate::virtio::scsi::constants::REPORT_LUNS;
26 use crate::virtio::scsi::constants::REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS;
27 use crate::virtio::scsi::constants::SERVICE_ACTION_IN_16;
28 use crate::virtio::scsi::constants::SYNCHRONIZE_CACHE_10;
29 use crate::virtio::scsi::constants::TEST_UNIT_READY;
30 use crate::virtio::scsi::constants::TYPE_DISK;
31 use crate::virtio::scsi::constants::UNMAP;
32 use crate::virtio::scsi::constants::WRITE_10;
33 use crate::virtio::scsi::constants::WRITE_SAME_10;
34 use crate::virtio::scsi::constants::WRITE_SAME_16;
35 use crate::virtio::scsi::device::AsyncLogicalUnit;
36 use crate::virtio::scsi::device::ExecuteError;
37 use crate::virtio::Reader;
38 use crate::virtio::Writer;
39
40 #[derive(Debug, PartialEq, Eq)]
41 pub enum Command {
42 TestUnitReady(TestUnitReady),
43 Read6(Read6),
44 Inquiry(Inquiry),
45 ModeSelect6(ModeSelect6),
46 ModeSense6(ModeSense6),
47 ReadCapacity10(ReadCapacity10),
48 ReadCapacity16(ReadCapacity16),
49 Read10(Read10),
50 Write10(Write10),
51 SynchronizeCache10(SynchronizeCache10),
52 WriteSame10(WriteSame10),
53 Unmap(Unmap),
54 WriteSame16(WriteSame16),
55 ReportLuns(ReportLuns),
56 ReportSupportedTMFs(ReportSupportedTMFs),
57 }
58
59 impl Command {
new(cdb: &[u8]) -> Result<Self, ExecuteError>60 pub fn new(cdb: &[u8]) -> Result<Self, ExecuteError> {
61 let op = cdb[0];
62 match op {
63 TEST_UNIT_READY => Ok(Self::TestUnitReady(Self::parse_command(cdb)?)),
64 READ_6 => Ok(Self::Read6(Self::parse_command(cdb)?)),
65 INQUIRY => Ok(Self::Inquiry(Self::parse_command(cdb)?)),
66 MODE_SELECT_6 => Ok(Self::ModeSelect6(Self::parse_command(cdb)?)),
67 MODE_SENSE_6 => Ok(Self::ModeSense6(Self::parse_command(cdb)?)),
68 READ_CAPACITY_10 => Ok(Self::ReadCapacity10(Self::parse_command(cdb)?)),
69 READ_10 => Ok(Self::Read10(Self::parse_command(cdb)?)),
70 WRITE_10 => Ok(Self::Write10(Self::parse_command(cdb)?)),
71 SYNCHRONIZE_CACHE_10 => Ok(Self::SynchronizeCache10(Self::parse_command(cdb)?)),
72 WRITE_SAME_10 => Ok(Self::WriteSame10(Self::parse_command(cdb)?)),
73 UNMAP => Ok(Self::Unmap(Self::parse_command(cdb)?)),
74 WRITE_SAME_16 => Ok(Self::WriteSame16(Self::parse_command(cdb)?)),
75 SERVICE_ACTION_IN_16 => Self::parse_service_action_in_16(cdb),
76 REPORT_LUNS => Ok(Self::ReportLuns(Self::parse_command(cdb)?)),
77 MAINTENANCE_IN => Self::parse_maintenance_in(cdb),
78 _ => {
79 warn!("SCSI command {:#x?} is not implemented", op);
80 Err(ExecuteError::Unsupported(op))
81 }
82 }
83 }
84
parse_command<T: FromBytes>(cdb: &[u8]) -> Result<T, ExecuteError>85 fn parse_command<T: FromBytes>(cdb: &[u8]) -> Result<T, ExecuteError> {
86 let size = std::mem::size_of::<T>();
87 T::read_from(&cdb[..size]).ok_or(ExecuteError::ReadCommand)
88 }
89
parse_maintenance_in(cdb: &[u8]) -> Result<Self, ExecuteError>90 fn parse_maintenance_in(cdb: &[u8]) -> Result<Self, ExecuteError> {
91 const MAINTENANCE_IN_SIZE: usize = 12;
92 // Top three bits are reserved.
93 let service_action = cdb[1] & 0x1f;
94 match service_action {
95 REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS => {
96 let r = ReportSupportedTMFs::read_from(&cdb[..MAINTENANCE_IN_SIZE])
97 .ok_or(ExecuteError::ReadCommand)?;
98 Ok(Self::ReportSupportedTMFs(r))
99 }
100 _ => {
101 warn!(
102 "service action {:#x?} for MAINTENANCE_IN is not implemented",
103 service_action
104 );
105 Err(ExecuteError::Unsupported(cdb[0]))
106 }
107 }
108 }
109
parse_service_action_in_16(cdb: &[u8]) -> Result<Self, ExecuteError>110 fn parse_service_action_in_16(cdb: &[u8]) -> Result<Self, ExecuteError> {
111 const SERVICE_ACTION_IN_16_SIZE: usize = 16;
112 // Top three bits are reserved.
113 let service_action = cdb[1] & 0x1f;
114 match service_action {
115 READ_CAPACITY_16 => {
116 let r = ReadCapacity16::read_from(&cdb[..SERVICE_ACTION_IN_16_SIZE])
117 .ok_or(ExecuteError::ReadCommand)?;
118 Ok(Self::ReadCapacity16(r))
119 }
120 _ => {
121 warn!(
122 "service action {:#x?} for SERVICE_ACTION_IN_16 is not implemented",
123 service_action
124 );
125 Err(ExecuteError::Unsupported(cdb[0]))
126 }
127 }
128 }
129
execute( &self, reader: &mut Reader, writer: &mut Writer, dev: &AsyncLogicalUnit, ) -> Result<(), ExecuteError>130 pub async fn execute(
131 &self,
132 reader: &mut Reader,
133 writer: &mut Writer,
134 dev: &AsyncLogicalUnit,
135 ) -> Result<(), ExecuteError> {
136 match self {
137 Self::TestUnitReady(_) => Ok(()), // noop as the device is ready.
138 Self::Read6(read6) => read6.emulate(writer, dev).await,
139 Self::Inquiry(inquiry) => inquiry.emulate(writer, dev),
140 Self::ModeSelect6(mode_select_6) => mode_select_6.emulate(reader, dev),
141 Self::ModeSense6(mode_sense_6) => mode_sense_6.emulate(writer, dev),
142 Self::ReadCapacity10(read_capacity_10) => read_capacity_10.emulate(writer, dev),
143 Self::ReadCapacity16(read_capacity_16) => read_capacity_16.emulate(writer, dev),
144 Self::Read10(read_10) => read_10.emulate(writer, dev).await,
145 Self::Write10(write_10) => write_10.emulate(reader, dev).await,
146 Self::SynchronizeCache10(synchronize_cache_10) => {
147 synchronize_cache_10.emulate(dev).await
148 }
149 Self::WriteSame10(write_same_10) => write_same_10.emulate(reader, dev).await,
150 Self::Unmap(unmap) => unmap.emulate(reader, dev).await,
151 Self::WriteSame16(write_same_16) => write_same_16.emulate(reader, dev).await,
152 Self::ReportLuns(report_luns) => report_luns.emulate(writer),
153 Self::ReportSupportedTMFs(report_supported_tmfs) => {
154 report_supported_tmfs.emulate(writer)
155 }
156 }
157 }
158 }
159
160 #[derive(Copy, Clone, Debug, Default, AsBytes, FromZeroes, FromBytes, PartialEq, Eq)]
161 #[repr(C, packed)]
162 pub struct TestUnitReady {
163 opcode: u8,
164 reserved: [u8; 4],
165 control: u8,
166 }
167
check_lba_range(max_lba: u64, sector_num: u64, sector_len: usize) -> Result<(), ExecuteError>168 fn check_lba_range(max_lba: u64, sector_num: u64, sector_len: usize) -> Result<(), ExecuteError> {
169 // Checking `sector_num + sector_len - 1 <= max_lba`, but we are being careful about overflows
170 // and underflows.
171 match sector_num.checked_add(sector_len as u64) {
172 Some(v) if v <= max_lba + 1 => Ok(()),
173 _ => Err(ExecuteError::LbaOutOfRange {
174 length: sector_len,
175 sector: sector_num,
176 max_lba,
177 }),
178 }
179 }
180
read_from_disk( writer: &mut Writer, dev: &AsyncLogicalUnit, xfer_blocks: usize, lba: u64, ) -> Result<(), ExecuteError>181 async fn read_from_disk(
182 writer: &mut Writer,
183 dev: &AsyncLogicalUnit,
184 xfer_blocks: usize,
185 lba: u64,
186 ) -> Result<(), ExecuteError> {
187 check_lba_range(dev.max_lba, lba, xfer_blocks)?;
188 let block_size = dev.block_size;
189 let count = xfer_blocks * block_size as usize;
190 let offset = lba * block_size as u64;
191 let before = writer.bytes_written();
192 writer
193 .write_all_from_at_fut(&*dev.disk_image, count, offset)
194 .await
195 .map_err(|desc_error| {
196 let resid = count - (writer.bytes_written() - before);
197 ExecuteError::ReadIo { resid, desc_error }
198 })
199 }
200
201 #[derive(Copy, Clone, Debug, Default, AsBytes, FromZeroes, FromBytes, PartialEq, Eq)]
202 #[repr(C, packed)]
203 pub struct Read6 {
204 opcode: u8,
205 lba_bytes: [u8; 3],
206 xfer_len_byte: u8,
207 control: u8,
208 }
209
210 impl Read6 {
lba(&self) -> u32211 fn lba(&self) -> u32 {
212 u32::from_be_bytes([
213 0,
214 // The top three bits are reserved.
215 self.lba_bytes[0] & 0x1f,
216 self.lba_bytes[1],
217 self.lba_bytes[2],
218 ])
219 }
220
xfer_len(&self) -> usize221 fn xfer_len(&self) -> usize {
222 // The transfer length set to 0 means 256 blocks should be read.
223 if self.xfer_len_byte == 0 {
224 256
225 } else {
226 self.xfer_len_byte as usize
227 }
228 }
229
emulate( &self, writer: &mut Writer, dev: &AsyncLogicalUnit, ) -> Result<(), ExecuteError>230 async fn emulate(
231 &self,
232 writer: &mut Writer,
233 dev: &AsyncLogicalUnit,
234 ) -> Result<(), ExecuteError> {
235 let xfer_len = self.xfer_len();
236 let lba = self.lba() as u64;
237 let _trace = cros_tracing::trace_event!(VirtioScsi, "READ(6)", xfer_len, lba);
238 read_from_disk(writer, dev, xfer_len, lba).await
239 }
240 }
241
242 #[derive(Copy, Clone, Debug, Default, AsBytes, FromZeroes, FromBytes, PartialEq, Eq)]
243 #[repr(C, packed)]
244 pub struct Inquiry {
245 opcode: u8,
246 vpd_field: u8,
247 page_code: u8,
248 alloc_len_bytes: [u8; 2],
249 control: u8,
250 }
251
252 impl Inquiry {
vital_product_data_enabled(&self) -> bool253 fn vital_product_data_enabled(&self) -> bool {
254 self.vpd_field & 0x1 != 0
255 }
256
alloc_len(&self) -> usize257 fn alloc_len(&self) -> usize {
258 u16::from_be_bytes(self.alloc_len_bytes) as usize
259 }
260
page_code(&self) -> u8261 fn page_code(&self) -> u8 {
262 self.page_code
263 }
264
emulate(&self, writer: &mut Writer, dev: &AsyncLogicalUnit) -> Result<(), ExecuteError>265 fn emulate(&self, writer: &mut Writer, dev: &AsyncLogicalUnit) -> Result<(), ExecuteError> {
266 let _trace = cros_tracing::trace_event!(VirtioScsi, "INQUIRY");
267 if self.vital_product_data_enabled() {
268 return self.emulate_vital_product_data_page(writer, dev);
269 }
270 // PAGE CODE should be 0 when vpd bit is 0.
271 if self.page_code() != 0 {
272 return Err(ExecuteError::InvalidField);
273 }
274 let alloc_len = self.alloc_len();
275 let mut outbuf = vec![0u8; cmp::max(writer.available_bytes(), alloc_len)];
276 // Peripheral
277 outbuf[0] = TYPE_DISK;
278 // Removable bit. We currently do not support removable SCSI devices.
279 outbuf[1] = 0x0;
280 // Version 0x5 indicates that the device complies to SPC-3.
281 outbuf[2] = 0x5;
282 // Hierarchical Support | Response Data Format
283 // Support hierarchical addressing mode to assign LUNs to logical units.
284 // Response Data Format should be 2.
285 outbuf[3] = 0x10 | 0x2;
286 // Additional Length
287 outbuf[4] = {
288 let buflen = outbuf.len().try_into().unwrap_or(u8::MAX);
289 // We will write at least 36 bytes and this is the 5th byte.
290 cmp::max(buflen, 36) - 5
291 };
292 // Cmdque: support full task management mode
293 outbuf[7] = 0x2;
294 // Vendor
295 Self::fill_left_aligned_ascii(&mut outbuf[8..16], "CROSVM");
296 // Product ID
297 Self::fill_left_aligned_ascii(&mut outbuf[16..32], "CROSVM HARDDISK");
298 // Product revision level
299 Self::fill_left_aligned_ascii(&mut outbuf[32..36], "0.1");
300
301 writer
302 .write_all(&outbuf[..alloc_len])
303 .map_err(ExecuteError::Write)
304 }
305
emulate_vital_product_data_page( &self, writer: &mut Writer, dev: &AsyncLogicalUnit, ) -> Result<(), ExecuteError>306 fn emulate_vital_product_data_page(
307 &self,
308 writer: &mut Writer,
309 dev: &AsyncLogicalUnit,
310 ) -> Result<(), ExecuteError> {
311 let alloc_len = self.alloc_len();
312 let mut outbuf = vec![0u8; cmp::max(4096, alloc_len)];
313 // Peripheral
314 outbuf[0] = TYPE_DISK;
315 let page_code = self.page_code();
316 outbuf[1] = page_code;
317 match page_code {
318 // Supported VPD Pages
319 0x00 => {
320 // outbuf[2] byte is reserved.
321 // 0x00: Supported VPD Pages (this command)
322 // 0x83: Device Identification
323 // 0xb0: Block Limits
324 // 0xb2: Logical Block Provisioning
325 const SUPPORTED_VPD_PAGE_CODES: [u8; 4] = [0x00, 0x83, 0xb0, 0xb2];
326 let page_code_len: u8 = SUPPORTED_VPD_PAGE_CODES
327 .len()
328 .try_into()
329 .expect("The number of vpd page codes cannot exceed u8::MAX");
330 // Page legth
331 outbuf[3] = page_code_len;
332 outbuf[4..4 + page_code_len as usize].copy_from_slice(&SUPPORTED_VPD_PAGE_CODES);
333 }
334 // Device Identification
335 0x83 => {
336 const DEVICE_ID: &[u8] = b"CROSVM SCSI DEVICE";
337 let device_id_len: u8 = DEVICE_ID
338 .len()
339 .try_into()
340 .expect("device id should be shorter");
341 // Page length: An identification descriptor will be 4 bytes followed by an id.
342 outbuf[2..4].copy_from_slice(&(4 + device_id_len as u16).to_be_bytes());
343 // ASCII
344 outbuf[4] = 0x2;
345 // ASSOCIATION | IDENTIFICATION_TYPE_FIELD
346 // ASSOCIATION: device_id is associated with the addressed logical unit.
347 // IDENTIFICATION_TYPE_FIELD: vendor specific
348 // outbuf[5] = 0x0 | 0x0;
349 // outbuf[6] byte is reserved.
350 outbuf[7] = device_id_len;
351 outbuf[8..8 + device_id_len as usize].copy_from_slice(DEVICE_ID);
352 }
353 // Block Limits
354 0xb0 => {
355 // Page length
356 outbuf[3] = 0x3c;
357 // We do not support a value of zero in the NUMBER OF LOGICAL BLOCKS field in the
358 // WRITE SAME command CDBs.
359 outbuf[4] = 1;
360 // skip outbuf[5]: crosvm does not support the COMPARE AND WRITE command.
361 // Maximum transfer length
362 outbuf[8..12]
363 .copy_from_slice(&dev.max_lba.try_into().unwrap_or(u32::MAX).to_be_bytes());
364 // Maximum unmap LBA count
365 outbuf[20..24].fill(0xff);
366 // Maximum unmap block descriptor count
367 outbuf[24..28].fill(0xff);
368 // Optimal unmap granularity
369 outbuf[28..32].copy_from_slice(&128u32.to_be_bytes());
370 // Maximum WRITE SAME length
371 outbuf[36..44].copy_from_slice(&dev.max_lba.to_be_bytes());
372 }
373 // Logical Block Provisioning
374 0xb2 => {
375 // Page length
376 outbuf[3] = 4;
377 // skip outbuf[4]: crosvm does not support logical block provisioning threshold
378 // sets.
379 const UNMAP: u8 = 1 << 7;
380 const WRITE_SAME_16: u8 = 1 << 6;
381 const WRITE_SAME_10: u8 = 1 << 5;
382 outbuf[5] = UNMAP | WRITE_SAME_10 | WRITE_SAME_16;
383 // The logical unit is thin-provisioned.
384 outbuf[6] = 0x02;
385 // skip outbuf[7]: The logical block data represented by unmapped LBAs is vendor
386 // specific
387 }
388 _ => {
389 warn!("unsupported vpd page code: {:#x?}", page_code);
390 return Err(ExecuteError::InvalidField);
391 }
392 };
393 writer
394 .write_all(&outbuf[..alloc_len])
395 .map_err(ExecuteError::Write)
396 }
397
fill_left_aligned_ascii(buf: &mut [u8], s: &str)398 fn fill_left_aligned_ascii(buf: &mut [u8], s: &str) {
399 debug_assert!(s.len() < buf.len());
400 buf[..s.len()].copy_from_slice(s.as_bytes());
401 buf[s.len()..].fill(b' ');
402 }
403 }
404
405 // Fill in the information of the page code and return the number of bytes written to the buffer.
fill_mode_page( page_code: u8, subpage_code: u8, page_control: PageControl, outbuf: &mut [u8], ) -> Option<u8>406 fn fill_mode_page(
407 page_code: u8,
408 subpage_code: u8,
409 page_control: PageControl,
410 outbuf: &mut [u8],
411 ) -> Option<u8> {
412 // outbuf[0]: page code
413 // outbuf[1]: page length
414 match (page_code, subpage_code) {
415 // Vendor specific.
416 (0x00, 0x00) => None,
417 // Read-Write error recovery mode page
418 (0x01, 0x00) => {
419 const LEN: u8 = 10;
420 outbuf[0] = page_code;
421 outbuf[1] = LEN;
422 if page_control != PageControl::Changable {
423 // Automatic write reallocation enabled.
424 outbuf[3] = 0x80;
425 }
426 Some(LEN + 2)
427 }
428 // Caching.
429 (0x08, 0x00) => {
430 const LEN: u8 = 0x12;
431 outbuf[0] = page_code;
432 outbuf[1] = LEN;
433 if page_control != PageControl::Changable {
434 // Writeback cache enabled.
435 outbuf[2] = 0x04;
436 }
437 Some(LEN + 2)
438 }
439 _ => None,
440 }
441 }
442
443 // According to the spec, devices that implement MODE SENSE(6) shall also implement MODE SELECT(6)
444 // as well.
445 #[derive(Copy, Clone, Debug, Default, AsBytes, FromZeroes, FromBytes, PartialEq, Eq)]
446 #[repr(C, packed)]
447 pub struct ModeSelect6 {
448 opcode: u8,
449 pf_sp_field: u8,
450 _reserved: [u8; 2],
451 param_list_len: u8,
452 control: u8,
453 }
454
455 impl ModeSelect6 {
is_valid_pf_and_sp(&self) -> bool456 fn is_valid_pf_and_sp(&self) -> bool {
457 // crosvm only support page format bit = 1 and saved pages bit = 0
458 self.pf_sp_field & 0x11 == 0x10
459 }
460
emulate(&self, reader: &mut Reader, dev: &AsyncLogicalUnit) -> Result<(), ExecuteError>461 fn emulate(&self, reader: &mut Reader, dev: &AsyncLogicalUnit) -> Result<(), ExecuteError> {
462 #[derive(Copy, Clone, Debug, Default, AsBytes, FromZeroes, FromBytes, PartialEq, Eq)]
463 #[repr(C, packed)]
464 struct BlockDescriptor {
465 _density: u8,
466 _number_of_blocks_field: [u8; 3],
467 _reserved: u8,
468 block_len_field: [u8; 3],
469 }
470
471 impl BlockDescriptor {
472 fn block_len(&self) -> u32 {
473 u32::from_be_bytes([
474 0,
475 self.block_len_field[0],
476 self.block_len_field[1],
477 self.block_len_field[2],
478 ])
479 }
480 }
481
482 let _trace = cros_tracing::trace_event!(VirtioScsi, "MODE_SELECT(6)");
483 if !self.is_valid_pf_and_sp() {
484 return Err(ExecuteError::InvalidField);
485 }
486 // Values for the mode parameter header.
487 let [_mode_data_len, medium_type, _dev_param, block_desc_len] =
488 reader.read_obj::<[u8; 4]>().map_err(ExecuteError::Read)?;
489 if medium_type != TYPE_DISK {
490 return Err(ExecuteError::InvalidField);
491 }
492 match block_desc_len {
493 0 => (),
494 8 => {
495 let block_desc = reader
496 .read_obj::<BlockDescriptor>()
497 .map_err(ExecuteError::Read)?;
498 // crosvm currently does not support modifying the block size.
499 if block_desc.block_len() != dev.block_size {
500 return Err(ExecuteError::InvalidField);
501 }
502 }
503 // crosvm does not support 2 or more block descriptors, hence block_desc_len other than
504 // 0 and 8 is considered invalid.
505 _ => return Err(ExecuteError::InvalidField),
506 };
507 while reader.available_bytes() > 0 {
508 Self::handle_mode_page(reader)?;
509 }
510 Ok(())
511 }
512
handle_mode_page(reader: &mut Reader) -> Result<(), ExecuteError>513 fn handle_mode_page(reader: &mut Reader) -> Result<(), ExecuteError> {
514 #[derive(Copy, Clone, Debug, Default, AsBytes, FromZeroes, FromBytes, PartialEq, Eq)]
515 #[repr(C, packed)]
516 struct Page0Header {
517 page_code: u8,
518 page_len: u8,
519 }
520
521 #[derive(Copy, Clone, Debug, Default, AsBytes, FromZeroes, FromBytes, PartialEq, Eq)]
522 #[repr(C, packed)]
523 struct SubpageHeader {
524 page_code: u8,
525 subpage_code: u8,
526 page_len_field: [u8; 2],
527 }
528
529 let is_page0 = reader.peek_obj::<u8>().map_err(ExecuteError::Read)? & 0x40 == 0;
530 let (page_code, subpage_code, page_len) = if is_page0 {
531 let header = reader
532 .read_obj::<Page0Header>()
533 .map_err(ExecuteError::Read)?;
534 (header.page_code, 0, header.page_len as u16)
535 } else {
536 let header = reader
537 .read_obj::<SubpageHeader>()
538 .map_err(ExecuteError::Read)?;
539 (
540 header.page_code,
541 header.subpage_code,
542 u16::from_be_bytes(header.page_len_field),
543 )
544 };
545 let mut outbuf = vec![0; page_len as usize];
546 fill_mode_page(page_code, subpage_code, PageControl::Current, &mut outbuf);
547 let mut input = vec![0; page_len as usize];
548 reader.read_exact(&mut input).map_err(ExecuteError::Read)?;
549 // crosvm does not allow any values to be changed.
550 if input == outbuf {
551 Ok(())
552 } else {
553 Err(ExecuteError::InvalidField)
554 }
555 }
556 }
557
558 #[derive(Copy, Clone, Debug, Default, AsBytes, FromZeroes, FromBytes, PartialEq, Eq)]
559 #[repr(C, packed)]
560 pub struct ModeSense6 {
561 opcode: u8,
562 dbd_field: u8,
563 page_control_and_page_code: u8,
564 subpage_code: u8,
565 alloc_len: u8,
566 control: u8,
567 }
568
569 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
570 enum PageControl {
571 Current,
572 Default,
573 Changable,
574 }
575
576 impl ModeSense6 {
alloc_len(&self) -> usize577 fn alloc_len(&self) -> usize {
578 self.alloc_len as usize
579 }
580
disable_block_desc(&self) -> bool581 fn disable_block_desc(&self) -> bool {
582 self.dbd_field & 0x8 != 0
583 }
584
page_code(&self) -> u8585 fn page_code(&self) -> u8 {
586 // The top two bits represents page control field, and the rest is page code.
587 self.page_control_and_page_code & 0x3f
588 }
589
page_control(&self) -> Result<PageControl, ExecuteError>590 fn page_control(&self) -> Result<PageControl, ExecuteError> {
591 match self.page_control_and_page_code >> 6 {
592 0 => Ok(PageControl::Current),
593 1 => Ok(PageControl::Changable),
594 2 => Ok(PageControl::Default),
595 3 => Err(ExecuteError::SavingParamNotSupported),
596 _ => Err(ExecuteError::InvalidField),
597 }
598 }
599
subpage_code(&self) -> u8600 fn subpage_code(&self) -> u8 {
601 self.subpage_code
602 }
603
emulate(&self, writer: &mut Writer, dev: &AsyncLogicalUnit) -> Result<(), ExecuteError>604 fn emulate(&self, writer: &mut Writer, dev: &AsyncLogicalUnit) -> Result<(), ExecuteError> {
605 let _trace = cros_tracing::trace_event!(VirtioScsi, "MODE_SENSE(6)");
606 let alloc_len = self.alloc_len();
607 let mut outbuf = vec![0u8; cmp::max(4096, alloc_len)];
608 // outbuf[0]: Represents data length. Will be filled later.
609 // outbuf[1]: Medium type should be 0.
610
611 // Device specific parameter
612 // We do not support the disabled page out (DPO) and forced unit access (FUA) bit.
613 outbuf[2] = if dev.read_only { 0x80 } else { 0x00 };
614 let mut idx = if !self.disable_block_desc() && dev.max_lba > 0 {
615 // Block descriptor length.
616 outbuf[3] = 8;
617 // outbuf[4]: Density code is 0.
618 let sectors = dev.max_lba;
619 // Fill in the number of sectors if not bigger than 0xffffff, leave it with 0
620 // otherwise.
621 if sectors <= 0xffffff {
622 outbuf[5..8].copy_from_slice(&(sectors as u32).to_be_bytes()[1..]);
623 }
624 // outbuf[8]: reserved.
625 outbuf[9..12].copy_from_slice(&dev.block_size.to_be_bytes()[1..]);
626 12
627 } else {
628 4
629 };
630
631 let page_control = self.page_control()?;
632 let page_code = self.page_code();
633 let subpage_code = self.subpage_code();
634 // The pair of the page code and the subpage code specifies which mode pages and subpages
635 // to return. Refer to the Table 99 in the SPC-3 spec for more details:
636 // <https://www.t10.org/cgi-bin/ac.pl?t=f&f=spc3r23.pdf>
637 match (page_code, subpage_code) {
638 // Return all mode pages with subpage 0.
639 (0x3f, 0x00) => {
640 Self::add_all_page_codes(subpage_code, page_control, &mut outbuf, &mut idx)
641 }
642 // Return all mode pages with subpages 0x00-0xfe.
643 (0x3f, 0xff) => {
644 for subpage_code in 0..0xff {
645 Self::add_all_page_codes(subpage_code, page_control, &mut outbuf, &mut idx)
646 }
647 }
648 // subpage_code other than 0x00 or 0xff are reserved.
649 (0x3f, _) => return Err(ExecuteError::InvalidField),
650 // Return a specific mode page with subpages 0x00-0xfe.
651 (_, 0xff) => {
652 for subpage_code in 0..0xff {
653 match fill_mode_page(
654 page_code,
655 subpage_code,
656 page_control,
657 &mut outbuf[idx as usize..],
658 ) {
659 Some(n) => idx += n,
660 None => return Err(ExecuteError::InvalidField),
661 };
662 }
663 }
664 (_, _) => {
665 match fill_mode_page(
666 page_code,
667 subpage_code,
668 page_control,
669 &mut outbuf[idx as usize..],
670 ) {
671 Some(n) => idx += n,
672 None => return Err(ExecuteError::InvalidField),
673 };
674 }
675 };
676 outbuf[0] = idx - 1;
677 writer
678 .write_all(&outbuf[..alloc_len])
679 .map_err(ExecuteError::Write)
680 }
681
682 // Fill in mode pages with a specific subpage_code.
add_all_page_codes( subpage_code: u8, page_control: PageControl, outbuf: &mut [u8], idx: &mut u8, )683 fn add_all_page_codes(
684 subpage_code: u8,
685 page_control: PageControl,
686 outbuf: &mut [u8],
687 idx: &mut u8,
688 ) {
689 for page_code in 1..0x3f {
690 if let Some(n) = fill_mode_page(
691 page_code,
692 subpage_code,
693 page_control,
694 &mut outbuf[*idx as usize..],
695 ) {
696 *idx += n;
697 }
698 }
699 // Add mode page 0 after all other mode pages were returned.
700 if let Some(n) = fill_mode_page(0, subpage_code, page_control, &mut outbuf[*idx as usize..])
701 {
702 *idx += n;
703 }
704 }
705 }
706
707 #[derive(Copy, Clone, Debug, Default, AsBytes, FromZeroes, FromBytes, PartialEq, Eq)]
708 #[repr(C, packed)]
709 pub struct ReadCapacity10 {
710 opcode: u8,
711 _obsolete1: u8,
712 _obsolete2: [u8; 4],
713 _reserved: [u8; 2],
714 _obsolete3: u8,
715 control: u8,
716 }
717
718 impl ReadCapacity10 {
emulate(&self, writer: &mut Writer, dev: &AsyncLogicalUnit) -> Result<(), ExecuteError>719 fn emulate(&self, writer: &mut Writer, dev: &AsyncLogicalUnit) -> Result<(), ExecuteError> {
720 // Returned value is the block address of the last sector.
721 // If the block address exceeds u32::MAX, we return u32::MAX.
722 let block_address: u32 = dev.max_lba.saturating_sub(1).try_into().unwrap_or(u32::MAX);
723 let mut outbuf = [0u8; 8];
724 outbuf[..4].copy_from_slice(&block_address.to_be_bytes());
725 outbuf[4..8].copy_from_slice(&dev.block_size.to_be_bytes());
726 writer.write_all(&outbuf).map_err(ExecuteError::Write)
727 }
728 }
729
730 #[derive(Copy, Clone, Debug, Default, AsBytes, FromZeroes, FromBytes, PartialEq, Eq)]
731 #[repr(C, packed)]
732 pub struct ReadCapacity16 {
733 opcode: u8,
734 service_action_field: u8,
735 _obsolete: [u8; 8],
736 alloc_len_bytes: [u8; 4],
737 _reserved: u8,
738 control: u8,
739 }
740
741 impl ReadCapacity16 {
emulate(&self, writer: &mut Writer, dev: &AsyncLogicalUnit) -> Result<(), ExecuteError>742 fn emulate(&self, writer: &mut Writer, dev: &AsyncLogicalUnit) -> Result<(), ExecuteError> {
743 let _trace = cros_tracing::trace_event!(VirtioScsi, "READ_CAPACITY(16)");
744 let mut outbuf = [0u8; 32];
745 // Last logical block address
746 outbuf[..8].copy_from_slice(&dev.max_lba.saturating_sub(1).to_be_bytes());
747 // Block size
748 outbuf[8..12].copy_from_slice(&dev.block_size.to_be_bytes());
749 // crosvm implements logical block provisioning management.
750 outbuf[14] = 1 << 7;
751 writer.write_all(&outbuf).map_err(ExecuteError::Write)
752 }
753 }
754
755 #[derive(Copy, Clone, Debug, Default, AsBytes, FromZeroes, FromBytes, PartialEq, Eq)]
756 #[repr(C, packed)]
757 pub struct Read10 {
758 opcode: u8,
759 rdprotect: u8,
760 lba_bytes: [u8; 4],
761 group_number: u8,
762 xfer_len_bytes: [u8; 2],
763 control: u8,
764 }
765
766 impl Read10 {
xfer_len(&self) -> usize767 fn xfer_len(&self) -> usize {
768 u16::from_be_bytes(self.xfer_len_bytes) as usize
769 }
770
lba(&self) -> u64771 fn lba(&self) -> u64 {
772 u32::from_be_bytes(self.lba_bytes) as u64
773 }
774
emulate( &self, writer: &mut Writer, dev: &AsyncLogicalUnit, ) -> Result<(), ExecuteError>775 async fn emulate(
776 &self,
777 writer: &mut Writer,
778 dev: &AsyncLogicalUnit,
779 ) -> Result<(), ExecuteError> {
780 let xfer_len = self.xfer_len();
781 let lba = self.lba();
782 let _trace = cros_tracing::trace_event!(VirtioScsi, "READ(10)", lba, xfer_len);
783 read_from_disk(writer, dev, xfer_len, lba).await
784 }
785 }
786
787 #[derive(Copy, Clone, Debug, Default, AsBytes, FromZeroes, FromBytes, PartialEq, Eq)]
788 #[repr(C, packed)]
789 pub struct Write10 {
790 opcode: u8,
791 wrprotect: u8,
792 lba_bytes: [u8; 4],
793 group_number: u8,
794 xfer_len_bytes: [u8; 2],
795 control: u8,
796 }
797
798 impl Write10 {
lba(&self) -> u64799 fn lba(&self) -> u64 {
800 u32::from_be_bytes(self.lba_bytes) as u64
801 }
802
xfer_len(&self) -> usize803 fn xfer_len(&self) -> usize {
804 u16::from_be_bytes(self.xfer_len_bytes) as usize
805 }
806
emulate( &self, reader: &mut Reader, dev: &AsyncLogicalUnit, ) -> Result<(), ExecuteError>807 async fn emulate(
808 &self,
809 reader: &mut Reader,
810 dev: &AsyncLogicalUnit,
811 ) -> Result<(), ExecuteError> {
812 let xfer_len = self.xfer_len();
813 let lba = self.lba();
814 let _trace = cros_tracing::trace_event!(VirtioScsi, "WRITE(10)", lba, xfer_len);
815 write_to_disk(reader, dev, xfer_len, lba).await
816 }
817 }
818
write_to_disk( reader: &mut Reader, dev: &AsyncLogicalUnit, xfer_blocks: usize, lba: u64, ) -> Result<(), ExecuteError>819 async fn write_to_disk(
820 reader: &mut Reader,
821 dev: &AsyncLogicalUnit,
822 xfer_blocks: usize,
823 lba: u64,
824 ) -> Result<(), ExecuteError> {
825 if dev.read_only {
826 return Err(ExecuteError::ReadOnly);
827 }
828 check_lba_range(dev.max_lba, lba, xfer_blocks)?;
829 let block_size = dev.block_size;
830 let count = xfer_blocks * block_size as usize;
831 let offset = lba * block_size as u64;
832 let before = reader.bytes_read();
833 reader
834 .read_exact_to_at_fut(&*dev.disk_image, count, offset)
835 .await
836 .map_err(|desc_error| {
837 let resid = count - (reader.bytes_read() - before);
838 ExecuteError::WriteIo { resid, desc_error }
839 })
840 }
841
842 #[derive(Copy, Clone, Debug, Default, AsBytes, FromZeroes, FromBytes, PartialEq, Eq)]
843 #[repr(C, packed)]
844 pub struct SynchronizeCache10 {
845 opcode: u8,
846 immed_byte: u8,
847 lba_bytes: [u8; 4],
848 group_number: u8,
849 block_num_bytes: [u8; 2],
850 control: u8,
851 }
852
853 impl SynchronizeCache10 {
emulate(&self, dev: &AsyncLogicalUnit) -> Result<(), ExecuteError>854 async fn emulate(&self, dev: &AsyncLogicalUnit) -> Result<(), ExecuteError> {
855 let _trace = cros_tracing::trace_event!(VirtioScsi, "SYNCHRONIZE_CACHE(10)");
856 if dev.read_only {
857 return Err(ExecuteError::ReadOnly);
858 }
859 dev.disk_image.fdatasync().await.map_err(|e| {
860 warn!("failed to sync: {e}");
861 ExecuteError::SynchronizationError
862 })
863 }
864 }
865
unmap(dev: &AsyncLogicalUnit, lba: u64, nblocks: u64) -> Result<(), ExecuteError>866 async fn unmap(dev: &AsyncLogicalUnit, lba: u64, nblocks: u64) -> Result<(), ExecuteError> {
867 check_lba_range(dev.max_lba, lba, nblocks as usize)?;
868 let offset = lba * dev.block_size as u64;
869 let length = nblocks * dev.block_size as u64;
870 // Ignore the errors here since the device is not strictly required to unmap the LBAs.
871 let _ = dev.disk_image.punch_hole(offset, length).await;
872 Ok(())
873 }
874
write_same( dev: &AsyncLogicalUnit, lba: u64, nblocks: u64, reader: &mut Reader, ) -> Result<(), ExecuteError>875 async fn write_same(
876 dev: &AsyncLogicalUnit,
877 lba: u64,
878 nblocks: u64,
879 reader: &mut Reader,
880 ) -> Result<(), ExecuteError> {
881 check_lba_range(dev.max_lba, lba, nblocks as usize)?;
882 // The WRITE SAME command expects the device to transfer a single logical block from the
883 // Data-Out buffer.
884 reader.split_at(dev.block_size as usize);
885 if reader.get_remaining().iter().all(|s| s.is_all_zero()) {
886 let block_size = dev.block_size as u64;
887 // Ignore the errors here since the device is not strictly required to unmap the LBAs.
888 let _ = dev
889 .disk_image
890 .write_zeroes_at(lba * block_size, nblocks * block_size)
891 .await;
892 Ok(())
893 } else {
894 // TODO(b/309376528): If the specified data is not zero, raise error for now.
895 Err(ExecuteError::InvalidField)
896 }
897 }
898
899 #[derive(Copy, Clone, Debug, Default, AsBytes, FromZeroes, FromBytes, PartialEq, Eq)]
900 #[repr(C, packed)]
901 pub struct WriteSame10 {
902 opcode: u8,
903 wrprotect_anchor_unmap: u8,
904 lba_bytes: [u8; 4],
905 group_number_field: u8,
906 nblocks_bytes: [u8; 2],
907 control: u8,
908 }
909
910 impl WriteSame10 {
lba(&self) -> u32911 fn lba(&self) -> u32 {
912 u32::from_be_bytes(self.lba_bytes)
913 }
914
nblocks(&self) -> u16915 fn nblocks(&self) -> u16 {
916 u16::from_be_bytes(self.nblocks_bytes)
917 }
918
unmap(&self) -> bool919 fn unmap(&self) -> bool {
920 self.wrprotect_anchor_unmap & 0x8 != 0
921 }
922
anchor(&self) -> bool923 fn anchor(&self) -> bool {
924 self.wrprotect_anchor_unmap & 0x10 != 0
925 }
926
emulate( &self, reader: &mut Reader, dev: &AsyncLogicalUnit, ) -> Result<(), ExecuteError>927 async fn emulate(
928 &self,
929 reader: &mut Reader,
930 dev: &AsyncLogicalUnit,
931 ) -> Result<(), ExecuteError> {
932 let lba = self.lba() as u64;
933 let nblocks = self.nblocks() as u64;
934 let _trace = cros_tracing::trace_event!(VirtioScsi, "WRITE_SAME(10)", lba, nblocks);
935 if dev.read_only {
936 return Err(ExecuteError::ReadOnly);
937 }
938 if nblocks == 0 {
939 // crosvm does not allow the number of blocks to be zero.
940 return Err(ExecuteError::InvalidField);
941 }
942 if self.anchor() {
943 // crosvm currently does not support anchor operations.
944 return Err(ExecuteError::InvalidField);
945 }
946 if self.unmap() {
947 unmap(dev, lba, nblocks).await
948 } else {
949 write_same(dev, lba, nblocks, reader).await
950 }
951 }
952 }
953
954 #[derive(Copy, Clone, Debug, Default, AsBytes, FromZeroes, FromBytes, PartialEq, Eq)]
955 #[repr(C, packed)]
956 pub struct Unmap {
957 opcode: u8,
958 anchor_field: u8,
959 _reserved: [u8; 4],
960 group_number_field: u8,
961 param_list_len_bytes: [u8; 2],
962 control: u8,
963 }
964
965 impl Unmap {
anchor(&self) -> bool966 fn anchor(&self) -> bool {
967 self.anchor_field & 0x01 != 0
968 }
969
param_list_len(&self) -> u16970 fn param_list_len(&self) -> u16 {
971 u16::from_be_bytes(self.param_list_len_bytes)
972 }
973
emulate( &self, reader: &mut Reader, dev: &AsyncLogicalUnit, ) -> Result<(), ExecuteError>974 async fn emulate(
975 &self,
976 reader: &mut Reader,
977 dev: &AsyncLogicalUnit,
978 ) -> Result<(), ExecuteError> {
979 let _trace = cros_tracing::trace_event!(VirtioScsi, "UNMAP");
980 // Reject anchor == 1
981 if self.anchor() {
982 return Err(ExecuteError::InvalidField);
983 }
984 if dev.read_only {
985 return Err(ExecuteError::ReadOnly);
986 }
987 let param_list_len = self.param_list_len();
988 if 0 < param_list_len && param_list_len < 8 {
989 return Err(ExecuteError::InvalidParamLen);
990 }
991 // unmap data len
992 reader.consume(2);
993 let unmap_block_descriptors = {
994 let block_data_len = reader
995 .read_obj::<Be16>()
996 .map_err(ExecuteError::Read)?
997 .to_native();
998 // If the data length is not a multiple of 16, the last unmap block should be ignored.
999 block_data_len / 16
1000 };
1001 // reserved
1002 reader.consume(4);
1003 for _ in 0..unmap_block_descriptors {
1004 let lba = reader
1005 .read_obj::<Be64>()
1006 .map_err(ExecuteError::Read)?
1007 .to_native();
1008 let nblocks = reader
1009 .read_obj::<Be32>()
1010 .map_err(ExecuteError::Read)?
1011 .to_native() as u64;
1012 // reserved
1013 reader.consume(4);
1014 unmap(dev, lba, nblocks).await?;
1015 }
1016 Ok(())
1017 }
1018 }
1019
1020 #[derive(Copy, Clone, Debug, Default, AsBytes, FromZeroes, FromBytes, PartialEq, Eq)]
1021 #[repr(C, packed)]
1022 pub struct WriteSame16 {
1023 opcode: u8,
1024 wrprotect_anchor_unmap: u8,
1025 lba_bytes: [u8; 8],
1026 nblocks_bytes: [u8; 4],
1027 group_number_field: u8,
1028 control: u8,
1029 }
1030
1031 impl WriteSame16 {
lba(&self) -> u641032 fn lba(&self) -> u64 {
1033 u64::from_be_bytes(self.lba_bytes)
1034 }
1035
nblocks(&self) -> u321036 fn nblocks(&self) -> u32 {
1037 u32::from_be_bytes(self.nblocks_bytes)
1038 }
1039
unmap(&self) -> bool1040 fn unmap(&self) -> bool {
1041 self.wrprotect_anchor_unmap & 0x8 != 0
1042 }
1043
anchor(&self) -> bool1044 fn anchor(&self) -> bool {
1045 self.wrprotect_anchor_unmap & 0x10 != 0
1046 }
1047
emulate( &self, reader: &mut Reader, dev: &AsyncLogicalUnit, ) -> Result<(), ExecuteError>1048 async fn emulate(
1049 &self,
1050 reader: &mut Reader,
1051 dev: &AsyncLogicalUnit,
1052 ) -> Result<(), ExecuteError> {
1053 let lba = self.lba();
1054 let nblocks = self.nblocks() as u64;
1055 let _trace = cros_tracing::trace_event!(VirtioScsi, "WRITE_SAME(16)", lba, nblocks);
1056 if nblocks == 0 {
1057 // crosvm does not allow the number of blocks to be zero.
1058 return Err(ExecuteError::InvalidField);
1059 }
1060 if self.anchor() {
1061 // crosvm currently does not support anchor operations.
1062 return Err(ExecuteError::InvalidField);
1063 }
1064 if self.unmap() {
1065 unmap(dev, lba, nblocks).await
1066 } else {
1067 write_same(dev, lba, nblocks, reader).await
1068 }
1069 }
1070 }
1071
1072 #[derive(Copy, Clone, Debug, Default, AsBytes, FromZeroes, FromBytes, PartialEq, Eq)]
1073 #[repr(C, packed)]
1074 pub struct ReportLuns {
1075 opcode: u8,
1076 _reserved: u8,
1077 select_report: u8,
1078 _reserved2: [u8; 3],
1079 alloc_len_bytes: [u8; 4],
1080 _reserved3: u8,
1081 control: u8,
1082 }
1083
1084 impl ReportLuns {
alloc_len(&self) -> usize1085 fn alloc_len(&self) -> usize {
1086 u32::from_be_bytes(self.alloc_len_bytes) as usize
1087 }
1088
emulate(&self, writer: &mut Writer) -> Result<(), ExecuteError>1089 fn emulate(&self, writer: &mut Writer) -> Result<(), ExecuteError> {
1090 let _trace = cros_tracing::trace_event!(VirtioScsi, "REPORT_LUNS");
1091 // We need at least 16 bytes.
1092 if self.alloc_len() < 16 {
1093 return Err(ExecuteError::InvalidField);
1094 }
1095 // Each LUN takes 8 bytes and we only support LUN0.
1096 let lun_list_len = 8u32;
1097 writer
1098 .write_all(&lun_list_len.to_be_bytes())
1099 .map_err(ExecuteError::Write)?;
1100 let reserved = [0; 4];
1101 writer.write_all(&reserved).map_err(ExecuteError::Write)?;
1102 let lun0 = 0u64;
1103 writer
1104 .write_all(&lun0.to_be_bytes())
1105 .map_err(ExecuteError::Write)
1106 }
1107 }
1108
1109 #[derive(Copy, Clone, Debug, Default, AsBytes, FromZeroes, FromBytes, PartialEq, Eq)]
1110 #[repr(C, packed)]
1111 pub struct ReportSupportedTMFs {
1112 opcode: u8,
1113 service_action_field: u8,
1114 _reserved1: [u8; 4],
1115 alloc_len_bytes: [u8; 4],
1116 _reserved2: u8,
1117 control: u8,
1118 }
1119
1120 impl ReportSupportedTMFs {
alloc_len(&self) -> u321121 fn alloc_len(&self) -> u32 {
1122 u32::from_be_bytes(self.alloc_len_bytes)
1123 }
1124
emulate(&self, writer: &mut Writer) -> Result<(), ExecuteError>1125 fn emulate(&self, writer: &mut Writer) -> Result<(), ExecuteError> {
1126 let _trace = cros_tracing::trace_event!(VirtioScsi, "REPORT_SUPPORTED_TMFs");
1127 // The allocation length should be at least four.
1128 if self.alloc_len() < 4 {
1129 return Err(ExecuteError::InvalidField);
1130 }
1131 // We support LOGICAL UNIT RESET and TARGET RESET.
1132 const LOGICAL_UNIT_RESET: u8 = 1 << 3;
1133 const TARGET_RESET: u8 = 1 << 1;
1134 writer
1135 .write_obj(LOGICAL_UNIT_RESET | TARGET_RESET)
1136 .map_err(ExecuteError::Write)?;
1137 // Push reserved bytes.
1138 let reserved = [0u8; 3];
1139 writer.write_all(&reserved).map_err(ExecuteError::Write)?;
1140 Ok(())
1141 }
1142 }
1143
1144 #[cfg(test)]
1145 mod tests {
1146 use super::*;
1147
1148 #[test]
parse_test_unit_ready()1149 fn parse_test_unit_ready() {
1150 let cdb = [0x00, 0x00, 0x00, 0x00, 0x00, 0x00];
1151 let command = Command::new(&cdb).unwrap();
1152 assert_eq!(
1153 command,
1154 Command::TestUnitReady(TestUnitReady {
1155 opcode: TEST_UNIT_READY,
1156 reserved: [0; 4],
1157 control: 0
1158 })
1159 );
1160 }
1161
1162 #[test]
parse_read6()1163 fn parse_read6() {
1164 let cdb = [0x08, 0xab, 0xcd, 0xef, 0x00, 0x00];
1165 let command = Command::new(&cdb).unwrap();
1166 let read6 = match command {
1167 Command::Read6(r) => r,
1168 _ => panic!("unexpected command type: {:?}", command),
1169 };
1170 assert_eq!(read6.xfer_len(), 256);
1171 assert_eq!(read6.lba(), 0x0bcdef);
1172 }
1173
1174 #[test]
parse_inquiry()1175 fn parse_inquiry() {
1176 let cdb = [0x12, 0x01, 0x00, 0x00, 0x40, 0x00];
1177 let command = Command::new(&cdb).unwrap();
1178 let inquiry = match command {
1179 Command::Inquiry(inq) => inq,
1180 _ => panic!("unexpected command type: {:?}", command),
1181 };
1182 assert!(inquiry.vital_product_data_enabled());
1183 assert_eq!(inquiry.alloc_len(), 0x0040);
1184 assert_eq!(inquiry.page_code(), 0x00);
1185 }
1186
1187 #[test]
parse_mode_sense_6()1188 fn parse_mode_sense_6() {
1189 let cdb = [0x1a, 0x00, 0xa8, 0x00, 0x04, 0x00];
1190 let command = Command::new(&cdb).unwrap();
1191 let mode_sense_6 = match command {
1192 Command::ModeSense6(m) => m,
1193 _ => panic!("unexpected command type: {:?}", command),
1194 };
1195 assert_eq!(mode_sense_6.alloc_len(), 0x04);
1196 assert_eq!(mode_sense_6.page_code(), 0x28);
1197 assert_eq!(mode_sense_6.page_control().unwrap(), PageControl::Default);
1198 }
1199
1200 #[test]
parse_read_capacity_10()1201 fn parse_read_capacity_10() {
1202 let cdb = [0x25, 0x00, 0xab, 0xcd, 0xef, 0x01, 0x00, 0x00, 0x9, 0x0];
1203 let command = Command::new(&cdb).unwrap();
1204 match command {
1205 Command::ReadCapacity10(_) => (),
1206 _ => panic!("unexpected command type: {:?}", command),
1207 };
1208 }
1209
1210 #[test]
parse_read10()1211 fn parse_read10() {
1212 let cdb = [0x28, 0x00, 0x00, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00];
1213 let command = Command::new(&cdb).unwrap();
1214 let read10 = match command {
1215 Command::Read10(r) => r,
1216 _ => panic!("unexpected command type: {:?}", command),
1217 };
1218 assert_eq!(read10.xfer_len(), 0x0008);
1219 assert_eq!(read10.lba(), 0x003c0000);
1220 }
1221
1222 #[test]
parse_write10()1223 fn parse_write10() {
1224 let cdb = [0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00];
1225 let command = Command::new(&cdb).unwrap();
1226 let write10 = match command {
1227 Command::Write10(w) => w,
1228 _ => panic!("unexpected command type: {:?}", command),
1229 };
1230 assert_eq!(write10.xfer_len(), 0x0008);
1231 assert_eq!(write10.lba(), 0x00000000);
1232 }
1233
1234 #[test]
parse_synchronize_cache_10()1235 fn parse_synchronize_cache_10() {
1236 let cdb = [0x35, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00];
1237 let command = Command::new(&cdb).unwrap();
1238 assert_eq!(
1239 command,
1240 Command::SynchronizeCache10(SynchronizeCache10 {
1241 opcode: SYNCHRONIZE_CACHE_10,
1242 immed_byte: 0,
1243 lba_bytes: [0x00, 0x00, 0x00, 0x00],
1244 group_number: 0x00,
1245 block_num_bytes: [0x00, 0x00],
1246 control: 0x00,
1247 })
1248 );
1249 }
1250
1251 #[test]
parse_report_luns()1252 fn parse_report_luns() {
1253 let cdb = [
1254 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0xab, 0xcd, 0xef, 0x12, 0x00, 0x00,
1255 ];
1256 let command = Command::new(&cdb).unwrap();
1257 let report_luns = match command {
1258 Command::ReportLuns(r) => r,
1259 _ => panic!("unexpected command type: {:?}", command),
1260 };
1261 assert_eq!(report_luns.alloc_len(), 0xabcdef12);
1262 }
1263
1264 #[test]
parse_report_supported_tmfs()1265 fn parse_report_supported_tmfs() {
1266 let cdb = [
1267 0xa3, 0x0d, 0x00, 0x00, 0x00, 0x00, 0xab, 0xcd, 0xef, 0x12, 0x00, 0x00,
1268 ];
1269 let command = Command::new(&cdb).unwrap();
1270 let report_supported_tmfs = match command {
1271 Command::ReportSupportedTMFs(r) => r,
1272 _ => panic!("unexpected command type: {:?}", command),
1273 };
1274 assert_eq!(report_supported_tmfs.alloc_len(), 0xabcdef12);
1275 }
1276 }
1277