1 use super::*;
2
3 use crate::test_gens::*;
4 use alloc::{vec, vec::Vec};
5 use proptest::prelude::*;
6
7 #[derive(Clone, Debug, Eq, PartialEq)]
8 struct ComponentTest {
9 link: Option<LinkHeader>,
10 vlan: Option<VlanHeader>,
11 ip: Option<IpHeaders>,
12 transport: Option<TransportHeader>,
13 payload: Vec<u8>,
14 }
15
16 static VLAN_ETHER_TYPES: &'static [EtherType] = &[
17 EtherType::VLAN_TAGGED_FRAME,
18 EtherType::PROVIDER_BRIDGING,
19 EtherType::VLAN_DOUBLE_TAGGED_FRAME,
20 ];
21
22 impl ComponentTest {
serialize(&self) -> Vec<u8>23 fn serialize(&self) -> Vec<u8> {
24 let mut buffer = Vec::<u8>::with_capacity(
25 match &self.link {
26 Some(header) => header.header_len(),
27 None => 0,
28 } + match &self.vlan {
29 Some(header) => header.header_len(),
30 None => 0,
31 } + match &self.ip {
32 Some(headers) => headers.header_len(),
33 None => 0,
34 } + match &self.transport {
35 Some(header) => header.header_len(),
36 None => 0,
37 } + self.payload.len(),
38 );
39
40 //fill all the elements
41 match &self.link {
42 Some(header) => header.write(&mut buffer).unwrap(),
43 None => {}
44 }
45 use crate::VlanHeader::*;
46 match &self.vlan {
47 Some(Single(header)) => header.write(&mut buffer).unwrap(),
48 Some(Double(header)) => header.write(&mut buffer).unwrap(),
49 None => {}
50 }
51 match &self.ip {
52 Some(IpHeaders::Ipv4(header, exts)) => {
53 header.write_raw(&mut buffer).unwrap();
54 exts.write(&mut buffer, header.protocol).unwrap();
55 }
56 Some(IpHeaders::Ipv6(header, exts)) => {
57 header.write(&mut buffer).unwrap();
58 exts.write(&mut buffer, header.next_header).unwrap();
59 }
60 None => {}
61 }
62 match &self.transport {
63 Some(TransportHeader::Icmpv6(header)) => header.write(&mut buffer).unwrap(),
64 Some(TransportHeader::Icmpv4(header)) => header.write(&mut buffer).unwrap(),
65 Some(TransportHeader::Udp(header)) => header.write(&mut buffer).unwrap(),
66 Some(TransportHeader::Tcp(header)) => header.write(&mut buffer).unwrap(),
67 None => {}
68 }
69 use std::io::Write;
70 buffer.write(&self.payload[..]).unwrap();
71 buffer
72 }
73
74 /// Serialize the headers & payload specified in the headers and check that
75 /// the different decoding & slicing methods for entire packets work correctly.
76 ///
77 /// The following functions will be checked if they work correctly:
78 /// * `SlicedPacket::from_ethernet`
79 /// * `SlicedPacket::from_ip`
80 /// * `PacketHeaders::from_ethernet_slice`
81 /// * `PacketHeaders::from_ip_slice`
run(&self)82 fn run(&self) {
83 // clone the test so the length fields can be adapted
84 let mut test = self.clone();
85
86 // set the payload length
87 if let Some(ip) = test.ip.as_mut() {
88 match ip {
89 IpHeaders::Ipv4(ipv4, exts) => {
90 ipv4.set_payload_len(
91 exts.header_len()
92 + self.transport.as_ref().map_or(0, |t| t.header_len())
93 + self.payload.len(),
94 )
95 .unwrap();
96 }
97 IpHeaders::Ipv6(ipv6, exts) => {
98 ipv6.set_payload_length(
99 exts.header_len()
100 + self.transport.as_ref().map_or(0, |t| t.header_len())
101 + self.payload.len(),
102 )
103 .unwrap();
104 }
105 }
106 }
107 if let Some(TransportHeader::Udp(udp)) = test.transport.as_mut() {
108 udp.length = udp.header_len_u16() + self.payload.len() as u16;
109 }
110
111 //packet with ethernet2 & vlan headers
112 {
113 //serialize to buffer
114 let buffer = test.serialize();
115
116 // PacketHeaders::from_ethernet_slice
117 test.assert_headers(PacketHeaders::from_ethernet_slice(&buffer).unwrap());
118
119 // SlicedPacket::from_ethernet
120 test.assert_sliced_packet(SlicedPacket::from_ethernet(&buffer).unwrap());
121
122 // create unexpected end of slice errors for the different headers
123 for len in test.invalid_ser_lengths() {
124 if let Some(len) = len {
125 assert!(PacketHeaders::from_ethernet_slice(&buffer[..len]).is_err());
126 assert!(SlicedPacket::from_ethernet(&buffer[..len]).is_err());
127 }
128 }
129 }
130
131 // packet data starting right after the link layer (tests from_ether_type functions)
132 {
133 // remove the link layer
134 let ether_down = {
135 let mut ether_down = test.clone();
136 ether_down.link = None;
137 ether_down
138 };
139
140 // serialize to buffer
141 let buffer = ether_down.serialize();
142
143 // PacketHeaders::from_ether_type
144 ether_down.assert_headers(
145 PacketHeaders::from_ether_type(
146 test.link.clone().unwrap().ethernet2().unwrap().ether_type,
147 &buffer[..],
148 )
149 .unwrap(),
150 );
151
152 // SlicedPacket::from_ether_type
153 ether_down.assert_sliced_packet(
154 SlicedPacket::from_ether_type(
155 test.link.clone().unwrap().ethernet2().unwrap().ether_type,
156 &buffer[..],
157 )
158 .unwrap(),
159 );
160
161 // create unexpected end of slice errors for the different headers
162 for len in ether_down.invalid_ser_lengths() {
163 if let Some(len) = len {
164 assert!(PacketHeaders::from_ether_type(
165 test.link.clone().unwrap().ethernet2().unwrap().ether_type,
166 &buffer[..len]
167 )
168 .is_err());
169 assert!(SlicedPacket::from_ether_type(
170 test.link.clone().unwrap().ethernet2().unwrap().ether_type,
171 &buffer[..len]
172 )
173 .is_err());
174 }
175 }
176 }
177
178 // packet from the internet layer down (without ethernet2 & vlan headers)
179 if test.ip.is_some() {
180 // serialize from the ip layer downwards
181 let ip_down = {
182 let mut ip_down = test.clone();
183 ip_down.link = None;
184 ip_down.vlan = None;
185 ip_down
186 };
187
188 // serialize to buffer
189 let buffer = ip_down.serialize();
190
191 // PacketHeaders::from_ip_slice
192 ip_down.assert_headers(PacketHeaders::from_ip_slice(&buffer).unwrap());
193
194 // SlicedPacket::from_ip
195 ip_down.assert_sliced_packet(SlicedPacket::from_ip(&buffer).unwrap());
196
197 // create unexpected end of slice errors for the different headers
198 for len in ip_down.invalid_ser_lengths() {
199 if let Some(len) = len {
200 assert!(PacketHeaders::from_ip_slice(&buffer[..len]).is_err());
201 assert!(SlicedPacket::from_ip(&buffer[..len]).is_err());
202 }
203 }
204 }
205 }
206
207 /// Creates slice lengths at which an too short slice error
208 /// should be triggered.
invalid_ser_lengths(&self) -> [Option<usize>; 12]209 fn invalid_ser_lengths(&self) -> [Option<usize>; 12] {
210 struct Builder {
211 result: [Option<usize>; 12],
212 next_index: usize,
213 offset: usize,
214 }
215
216 impl Builder {
add(&mut self, header_len: usize)217 fn add(&mut self, header_len: usize) {
218 self.offset += header_len;
219 self.result[self.next_index] = Some(self.offset - 1);
220 self.next_index += 1;
221 }
222 }
223
224 let mut builder = Builder {
225 result: [None; 12],
226 next_index: 0,
227 offset: 0,
228 };
229
230 if let Some(link) = self.link.as_ref() {
231 builder.add(link.header_len());
232 }
233 if let Some(vlan) = self.vlan.as_ref() {
234 use VlanHeader::*;
235 match vlan {
236 Single(single) => builder.add(single.header_len()),
237 Double(double) => {
238 builder.add(double.outer.header_len());
239 builder.add(double.inner.header_len());
240 }
241 }
242 }
243 if let Some(ip) = self.ip.as_ref() {
244 use IpHeaders::*;
245 match ip {
246 Ipv4(header, exts) => {
247 builder.add(header.header_len());
248 if let Some(auth) = exts.auth.as_ref() {
249 builder.add(auth.header_len());
250 }
251 }
252 Ipv6(header, exts) => {
253 builder.add(header.header_len());
254 if let Some(e) = exts.hop_by_hop_options.as_ref() {
255 builder.add(e.header_len());
256 }
257 if let Some(e) = exts.destination_options.as_ref() {
258 builder.add(e.header_len());
259 }
260 if let Some(routing) = exts.routing.as_ref() {
261 builder.add(routing.routing.header_len());
262 if let Some(e) = routing.final_destination_options.as_ref() {
263 builder.add(e.header_len());
264 }
265 }
266 if let Some(e) = exts.fragment.as_ref() {
267 builder.add(e.header_len());
268 }
269 if let Some(e) = exts.auth.as_ref() {
270 builder.add(e.header_len());
271 }
272 }
273 }
274 }
275 if let Some(transport) = self.transport.as_ref() {
276 builder.add(transport.header_len());
277 }
278
279 builder.result
280 }
281
assert_headers(&self, actual: PacketHeaders)282 fn assert_headers(&self, actual: PacketHeaders) {
283 assert_eq!(self.link, actual.link);
284 assert_eq!(self.vlan, actual.vlan);
285 assert_eq!(self.ip, self.ip);
286 assert_eq!(self.transport, actual.transport);
287 assert_eq!(self.payload[..], actual.payload.slice()[..]);
288 }
289
assert_sliced_packet(&self, result: SlicedPacket)290 fn assert_sliced_packet(&self, result: SlicedPacket) {
291 //assert identity to touch the derives (code coverage hack)
292 assert_eq!(result, result);
293
294 //ethernet & vlan
295 assert_eq!(
296 self.link,
297 match result.link.as_ref() {
298 Some(l) => match l {
299 LinkSlice::Ethernet2(e) => Some(LinkHeader::Ethernet2(e.to_header())),
300 LinkSlice::LinuxSll(e) => Some(LinkHeader::LinuxSll(e.to_header())),
301 LinkSlice::EtherPayload(_) => None,
302 LinkSlice::LinuxSllPayload(_) => None,
303 },
304 None => None,
305 }
306 ); //.unwrap_or(None).map(|ref x| x.to_header()));
307 assert_eq!(self.vlan, result.vlan.as_ref().map(|ref x| x.to_header()));
308
309 //ip
310 assert_eq!(self.ip, {
311 use crate::NetSlice::*;
312 match result.net.as_ref() {
313 Some(Ipv4(actual)) => Some(IpHeaders::Ipv4(
314 actual.header().to_header(),
315 Ipv4Extensions {
316 auth: actual.extensions().auth.map(|ref x| x.to_header()),
317 },
318 )),
319 Some(Ipv6(actual)) => Some(IpHeaders::Ipv6(
320 actual.header().to_header(),
321 Ipv6Extensions::from_slice(
322 actual.header().next_header(),
323 actual.extensions().slice(),
324 )
325 .unwrap()
326 .0,
327 )),
328 None => None,
329 }
330 });
331
332 // transport header
333 assert_eq!(
334 self.transport,
335 match result.transport.as_ref() {
336 Some(TransportSlice::Icmpv4(actual)) =>
337 Some(TransportHeader::Icmpv4(actual.header())),
338 Some(TransportSlice::Icmpv6(actual)) =>
339 Some(TransportHeader::Icmpv6(actual.header())),
340 Some(TransportSlice::Udp(actual)) => Some(TransportHeader::Udp(actual.to_header())),
341 Some(TransportSlice::Tcp(actual)) => Some(TransportHeader::Tcp(actual.to_header())),
342 None => None,
343 }
344 );
345 // additional check for the contents of Unknown
346 if self.transport.is_none() {
347 match result.transport.as_ref() {
348 None => assert!(result.transport.is_none()),
349 _ => unreachable!(),
350 }
351 }
352
353 //payload
354 match result.transport.as_ref() {
355 Some(TransportSlice::Icmpv4(icmpv4)) => {
356 assert_eq!(&self.payload[..], icmpv4.payload());
357 }
358 Some(TransportSlice::Icmpv6(icmpv6)) => {
359 assert_eq!(&self.payload[..], icmpv6.payload());
360 }
361 Some(TransportSlice::Udp(udp)) => {
362 assert_eq!(&self.payload[..], udp.payload());
363 }
364 Some(TransportSlice::Tcp(tcp)) => {
365 assert_eq!(&self.payload[..], tcp.payload());
366 }
367 // check ip next
368 None => {
369 if let Some(ip) = result.net.as_ref() {
370 assert_eq!(
371 &self.payload[..],
372 match ip {
373 NetSlice::Ipv4(s) => s.payload.payload,
374 NetSlice::Ipv6(s) => s.payload.payload,
375 }
376 );
377 } else {
378 if let Some(vlan) = result.vlan.as_ref() {
379 assert_eq!(&self.payload[..], vlan.payload().payload);
380 } else {
381 if let Some(LinkSlice::Ethernet2(eth)) = result.link.as_ref() {
382 assert_eq!(&self.payload[..], eth.payload().payload);
383 }
384 }
385 }
386 }
387 }
388 }
389
run_vlan( &self, outer_vlan: &SingleVlanHeader, inner_vlan: &SingleVlanHeader, ipv4: &Ipv4Header, ipv4_ext: &Ipv4Extensions, ipv6: &Ipv6Header, ipv6_ext: &Ipv6Extensions, udp: &UdpHeader, tcp: &TcpHeader, icmpv4: &Icmpv4Header, icmpv6: &Icmpv6Header, )390 fn run_vlan(
391 &self,
392 outer_vlan: &SingleVlanHeader,
393 inner_vlan: &SingleVlanHeader,
394 ipv4: &Ipv4Header,
395 ipv4_ext: &Ipv4Extensions,
396 ipv6: &Ipv6Header,
397 ipv6_ext: &Ipv6Extensions,
398 udp: &UdpHeader,
399 tcp: &TcpHeader,
400 icmpv4: &Icmpv4Header,
401 icmpv6: &Icmpv6Header,
402 ) {
403 let setup_single = |ether_type: EtherType| -> ComponentTest {
404 let mut result = self.clone();
405 result.vlan = Some(VlanHeader::Single({
406 let mut v = inner_vlan.clone();
407 v.ether_type = ether_type;
408 v
409 }));
410 result
411 };
412 let setup_double =
413 |outer_ether_type: EtherType, inner_ether_type: EtherType| -> ComponentTest {
414 let mut result = self.clone();
415 result.vlan = Some(VlanHeader::Double(DoubleVlanHeader {
416 outer: {
417 let mut v = outer_vlan.clone();
418 v.ether_type = outer_ether_type;
419 v
420 },
421 inner: {
422 let mut v = inner_vlan.clone();
423 v.ether_type = inner_ether_type;
424 v
425 },
426 }));
427 result
428 };
429
430 //single
431 setup_single(inner_vlan.ether_type).run();
432 setup_single(ether_type::IPV4).run_ipv4(ipv4, ipv4_ext, udp, tcp, icmpv4, icmpv6);
433 setup_single(ether_type::IPV6).run_ipv6(ipv6, ipv6_ext, udp, tcp, icmpv4, icmpv6);
434
435 //double
436 for ether_type in VLAN_ETHER_TYPES {
437 setup_double(*ether_type, inner_vlan.ether_type).run();
438 setup_double(*ether_type, ether_type::IPV4)
439 .run_ipv4(ipv4, ipv4_ext, udp, tcp, icmpv4, icmpv6);
440 setup_double(*ether_type, ether_type::IPV6)
441 .run_ipv6(ipv6, ipv6_ext, udp, tcp, icmpv4, icmpv6);
442 }
443 }
444
run_ipv4( &self, ip: &Ipv4Header, ip_exts: &Ipv4Extensions, udp: &UdpHeader, tcp: &TcpHeader, icmpv4: &Icmpv4Header, icmpv6: &Icmpv6Header, )445 fn run_ipv4(
446 &self,
447 ip: &Ipv4Header,
448 ip_exts: &Ipv4Extensions,
449 udp: &UdpHeader,
450 tcp: &TcpHeader,
451 icmpv4: &Icmpv4Header,
452 icmpv6: &Icmpv6Header,
453 ) {
454 // fragmenting
455 {
456 let mut test = self.clone();
457 test.ip = Some({
458 let mut frag = ip.clone();
459 if false == frag.is_fragmenting_payload() {
460 frag.more_fragments = true;
461 }
462 let mut header = IpHeaders::Ipv4(frag, ip_exts.clone());
463 header.set_next_headers(ip.protocol);
464 header
465 });
466
467 // run without transport header
468 test.run();
469 }
470
471 // non fragmenting
472 {
473 let mut test = self.clone();
474 test.ip = Some({
475 let mut non_frag = ip.clone();
476 non_frag.more_fragments = false;
477 non_frag.fragment_offset = 0.try_into().unwrap();
478 let mut header = IpHeaders::Ipv4(non_frag, ip_exts.clone());
479 header.set_next_headers(ip.protocol);
480 header
481 });
482 test.run_transport(udp, tcp, icmpv4, icmpv6);
483 }
484 }
485
run_ipv6( &self, ip: &Ipv6Header, ip_exts: &Ipv6Extensions, udp: &UdpHeader, tcp: &TcpHeader, icmpv4: &Icmpv4Header, icmpv6: &Icmpv6Header, )486 fn run_ipv6(
487 &self,
488 ip: &Ipv6Header,
489 ip_exts: &Ipv6Extensions,
490 udp: &UdpHeader,
491 tcp: &TcpHeader,
492 icmpv4: &Icmpv4Header,
493 icmpv6: &Icmpv6Header,
494 ) {
495 // fragmenting
496 {
497 let mut test = self.clone();
498 test.ip = Some({
499 let mut frag = ip_exts.clone();
500 if let Some(frag) = frag.fragment.as_mut() {
501 if false == frag.is_fragmenting_payload() {
502 frag.more_fragments = true;
503 }
504 } else {
505 frag.fragment = Some(Ipv6FragmentHeader::new(
506 ip_number::UDP,
507 IpFragOffset::ZERO,
508 true,
509 0,
510 ));
511 }
512 let mut header = IpHeaders::Ipv6(ip.clone(), frag);
513 header.set_next_headers(ip.next_header);
514 header
515 });
516 test.run();
517 }
518
519 // non fragmenting
520 {
521 let mut test = self.clone();
522 test.ip = Some({
523 let mut non_frag = ip_exts.clone();
524 non_frag.fragment = None;
525 let mut header = IpHeaders::Ipv6(ip.clone(), non_frag);
526 header.set_next_headers(ip.next_header);
527 header
528 });
529 test.run_transport(udp, tcp, icmpv4, icmpv6);
530 }
531 }
532
run_transport( &self, udp: &UdpHeader, tcp: &TcpHeader, icmpv4: &Icmpv4Header, icmpv6: &Icmpv6Header, )533 fn run_transport(
534 &self,
535 udp: &UdpHeader,
536 tcp: &TcpHeader,
537 icmpv4: &Icmpv4Header,
538 icmpv6: &Icmpv6Header,
539 ) {
540 // unknown transport layer
541 self.run();
542
543 // udp
544 {
545 let mut test = self.clone();
546 test.ip.as_mut().unwrap().set_next_headers(ip_number::UDP);
547 test.transport = Some(TransportHeader::Udp(udp.clone()));
548 test.run()
549 }
550
551 // tcp
552 {
553 let mut test = self.clone();
554 test.ip.as_mut().unwrap().set_next_headers(ip_number::TCP);
555 test.transport = Some(TransportHeader::Tcp(tcp.clone()));
556 test.run()
557 }
558
559 // icmpv4
560 if let Some(payload_size) = icmpv4.fixed_payload_size() {
561 let mut test = self.clone();
562 test.ip.as_mut().unwrap().set_next_headers(ip_number::ICMP);
563 test.transport = Some(TransportHeader::Icmpv4(icmpv4.clone()));
564 // resize the payload in case it does not have to be as big
565 test.payload.resize(payload_size, 0);
566 test.run()
567 } else {
568 let mut test = self.clone();
569 test.ip.as_mut().unwrap().set_next_headers(ip_number::ICMP);
570 test.transport = Some(TransportHeader::Icmpv4(icmpv4.clone()));
571 test.run()
572 }
573
574 // icmpv6
575 if let Some(payload_size) = icmpv6.fixed_payload_size() {
576 let mut test = self.clone();
577 test.ip
578 .as_mut()
579 .unwrap()
580 .set_next_headers(ip_number::IPV6_ICMP);
581 test.transport = Some(TransportHeader::Icmpv6(icmpv6.clone()));
582 // resize the payload in case it does not have to be as big
583 test.payload.resize(payload_size, 0);
584 test.run()
585 } else {
586 let mut test = self.clone();
587 test.ip
588 .as_mut()
589 .unwrap()
590 .set_next_headers(ip_number::IPV6_ICMP);
591 test.transport = Some(TransportHeader::Icmpv6(icmpv6.clone()));
592 test.run()
593 }
594 }
595 }
596
597 proptest! {
598 ///Test that all known packet compositions are parsed correctly.
599 #[test]
600 #[cfg_attr(miri, ignore)] // vec allocation reduces miri runspeed too much
601 fn test_compositions(ref eth in ethernet_2_unknown(),
602 ref vlan_outer in vlan_single_unknown(),
603 ref vlan_inner in vlan_single_unknown(),
604 ref ipv4 in ipv4_unknown(),
605 ref ipv4_exts in ipv4_extensions_unknown(),
606 ref ipv6 in ipv6_unknown(),
607 ref ipv6_exts in ipv6_extensions_unknown(),
608 ref udp in udp_any(),
609 ref tcp in tcp_any(),
610 ref icmpv4 in icmpv4_header_any(),
611 ref icmpv6 in icmpv6_header_any(),
612 ref payload in proptest::collection::vec(any::<u8>(), 0..1024))
613 {
614 let setup_eth = | ether_type: EtherType | -> ComponentTest {
615 ComponentTest {
616 payload: payload.clone(),
617 link: Some({
618 let mut result = eth.clone();
619 result.ether_type = ether_type;
620 LinkHeader::Ethernet2(result)
621 }),
622 vlan: None,
623 ip: None,
624 transport: None
625 }
626 };
627
628 //ethernet 2: standalone, ipv4, ipv6
629 setup_eth(eth.ether_type).run();
630 setup_eth(ether_type::IPV4).run_ipv4(ipv4, ipv4_exts, udp, tcp, icmpv4, icmpv6);
631 setup_eth(ether_type::IPV6).run_ipv6(ipv6, ipv6_exts, udp, tcp, icmpv4, icmpv6);
632
633 //vlans
634 for ether_type in VLAN_ETHER_TYPES {
635 setup_eth(*ether_type).run_vlan(vlan_outer, vlan_inner, ipv4, ipv4_exts, ipv6, ipv6_exts, udp, tcp, icmpv4, icmpv6);
636 }
637 }
638 }
639
640 ///Test that assert_sliced_packet is panicking when the ethernet header is missing
641 #[test]
642 #[should_panic]
test_packet_slicing_panics()643 fn test_packet_slicing_panics() {
644 let s = SlicedPacket {
645 link: None,
646 vlan: None,
647 net: None,
648 transport: None,
649 };
650 ComponentTest {
651 link: Some(LinkHeader::Ethernet2(Ethernet2Header {
652 source: [0; 6],
653 destination: [0; 6],
654 ether_type: 0.into(),
655 })),
656 vlan: None,
657 ip: None,
658 transport: None,
659 payload: vec![],
660 }
661 .assert_sliced_packet(s);
662 }
663