1 use crate::iter::Bytes;
2 
3 #[inline]
4 #[target_feature(enable = "avx2")]
match_uri_vectored(bytes: &mut Bytes)5 pub unsafe fn match_uri_vectored(bytes: &mut Bytes) {
6     while bytes.as_ref().len() >= 32 {
7         let advance = match_url_char_32_avx(bytes.as_ref());
8         bytes.advance(advance);
9 
10         if advance != 32 {
11             return;
12         }
13     }
14     // NOTE: use SWAR for <32B, more efficient than falling back to SSE4.2
15     super::swar::match_uri_vectored(bytes)
16 }
17 
18 #[inline(always)]
19 #[allow(non_snake_case, overflowing_literals)]
20 #[allow(unused)]
match_url_char_32_avx(buf: &[u8]) -> usize21 unsafe fn match_url_char_32_avx(buf: &[u8]) -> usize {
22     debug_assert!(buf.len() >= 32);
23 
24     #[cfg(target_arch = "x86")]
25     use core::arch::x86::*;
26     #[cfg(target_arch = "x86_64")]
27     use core::arch::x86_64::*;
28 
29     let ptr = buf.as_ptr();
30 
31     let LSH: __m256i = _mm256_set1_epi8(0x0f);
32 
33     // See comment in sse42::match_url_char_16_sse.
34 
35     let URI: __m256i = _mm256_setr_epi8(
36         0xf8, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc,
37         0xfc, 0xfc, 0xfc, 0xfc, 0xf4, 0xfc, 0xf4, 0x7c,
38         0xf8, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc,
39         0xfc, 0xfc, 0xfc, 0xfc, 0xf4, 0xfc, 0xf4, 0x7c,
40     );
41     let ARF: __m256i = _mm256_setr_epi8(
42         0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
43         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
44         0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
45         0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
46     );
47 
48     let data = _mm256_lddqu_si256(ptr as *const _);
49     let rbms = _mm256_shuffle_epi8(URI, data);
50     let cols = _mm256_and_si256(LSH, _mm256_srli_epi16(data, 4));
51     let bits = _mm256_and_si256(_mm256_shuffle_epi8(ARF, cols), rbms);
52 
53     let v = _mm256_cmpeq_epi8(bits, _mm256_setzero_si256());
54     let r = _mm256_movemask_epi8(v) as u32;
55 
56     r.trailing_zeros() as usize
57 }
58 
59 #[target_feature(enable = "avx2")]
match_header_value_vectored(bytes: &mut Bytes)60 pub unsafe fn match_header_value_vectored(bytes: &mut Bytes) {
61     while bytes.as_ref().len() >= 32 {
62         let advance = match_header_value_char_32_avx(bytes.as_ref());
63         bytes.advance(advance);
64 
65         if advance != 32 {
66             return;
67         }
68     }
69     // NOTE: use SWAR for <32B, more efficient than falling back to SSE4.2
70     super::swar::match_header_value_vectored(bytes)
71 }
72 
73 #[inline(always)]
74 #[allow(non_snake_case)]
75 #[allow(unused)]
match_header_value_char_32_avx(buf: &[u8]) -> usize76 unsafe fn match_header_value_char_32_avx(buf: &[u8]) -> usize {
77     debug_assert!(buf.len() >= 32);
78 
79     #[cfg(target_arch = "x86")]
80     use core::arch::x86::*;
81     #[cfg(target_arch = "x86_64")]
82     use core::arch::x86_64::*;
83 
84     let ptr = buf.as_ptr();
85 
86     // %x09 %x20-%x7e %x80-%xff
87     let TAB: __m256i = _mm256_set1_epi8(0x09);
88     let DEL: __m256i = _mm256_set1_epi8(0x7f);
89     let LOW: __m256i = _mm256_set1_epi8(0x20);
90 
91     let dat = _mm256_lddqu_si256(ptr as *const _);
92     // unsigned comparison dat >= LOW
93     let low = _mm256_cmpeq_epi8(_mm256_max_epu8(dat, LOW), dat);
94     let tab = _mm256_cmpeq_epi8(dat, TAB);
95     let del = _mm256_cmpeq_epi8(dat, DEL);
96     let bit = _mm256_andnot_si256(del, _mm256_or_si256(low, tab));
97     let res = _mm256_movemask_epi8(bit) as u32;
98     // TODO: use .trailing_ones() once MSRV >= 1.46
99     (!res).trailing_zeros() as usize
100 }
101 
102 #[test]
avx2_code_matches_uri_chars_table()103 fn avx2_code_matches_uri_chars_table() {
104     if !is_x86_feature_detected!("avx2") {
105         return;
106     }
107 
108     #[allow(clippy::undocumented_unsafe_blocks)]
109     unsafe {
110         assert!(byte_is_allowed(b'_', match_uri_vectored));
111 
112         for (b, allowed) in crate::URI_MAP.iter().cloned().enumerate() {
113             assert_eq!(
114                 byte_is_allowed(b as u8, match_uri_vectored), allowed,
115                 "byte_is_allowed({:?}) should be {:?}", b, allowed,
116             );
117         }
118     }
119 }
120 
121 #[test]
avx2_code_matches_header_value_chars_table()122 fn avx2_code_matches_header_value_chars_table() {
123     if !is_x86_feature_detected!("avx2") {
124         return;
125     }
126 
127     #[allow(clippy::undocumented_unsafe_blocks)]
128     unsafe {
129         assert!(byte_is_allowed(b'_', match_header_value_vectored));
130 
131         for (b, allowed) in crate::HEADER_VALUE_MAP.iter().cloned().enumerate() {
132             assert_eq!(
133                 byte_is_allowed(b as u8, match_header_value_vectored), allowed,
134                 "byte_is_allowed({:?}) should be {:?}", b, allowed,
135             );
136         }
137     }
138 }
139 
140 #[cfg(test)]
byte_is_allowed(byte: u8, f: unsafe fn(bytes: &mut Bytes<'_>)) -> bool141 unsafe fn byte_is_allowed(byte: u8, f: unsafe fn(bytes: &mut Bytes<'_>)) -> bool {
142     let slice = [
143         b'_', b'_', b'_', b'_',
144         b'_', b'_', b'_', b'_',
145         b'_', b'_', b'_', b'_',
146         b'_', b'_', b'_', b'_',
147         b'_', b'_', b'_', b'_',
148         b'_', b'_', b'_', b'_',
149         b'_', b'_', byte, b'_',
150         b'_', b'_', b'_', b'_',
151     ];
152     let mut bytes = Bytes::new(&slice);
153 
154     f(&mut bytes);
155 
156     match bytes.pos() {
157         32 => true,
158         26 => false,
159         _ => unreachable!(),
160     }
161 }
162