1 use std::sync::atomic::{AtomicU8, Ordering};
2 use crate::iter::Bytes;
3 use super::avx2;
4 use super::sse42;
5 
6 const AVX2: u8 = 1;
7 const SSE42: u8 = 2;
8 const NOP: u8 = 3;
9 
detect_runtime_feature() -> u810 fn detect_runtime_feature() -> u8 {
11     if is_x86_feature_detected!("avx2") {
12         AVX2
13     } else if is_x86_feature_detected!("sse4.2") {
14         SSE42
15     } else {
16         NOP
17     }
18 }
19 
20 static RUNTIME_FEATURE: AtomicU8 = AtomicU8::new(0);
21 
22 #[inline]
get_runtime_feature() -> u823 fn get_runtime_feature() -> u8 {
24     let mut feature = RUNTIME_FEATURE.load(Ordering::Relaxed);
25     if feature == 0 {
26         feature = detect_runtime_feature();
27         RUNTIME_FEATURE.store(feature, Ordering::Relaxed);
28     }
29 
30     feature
31 }
32 
match_header_name_vectored(bytes: &mut Bytes)33 pub fn match_header_name_vectored(bytes: &mut Bytes) {
34     super::swar::match_header_name_vectored(bytes);
35 }
36 
match_uri_vectored(bytes: &mut Bytes)37 pub fn match_uri_vectored(bytes: &mut Bytes) {
38     // SAFETY: calls are guarded by a feature check
39     unsafe {
40         match get_runtime_feature() {
41             AVX2 => avx2::match_uri_vectored(bytes),
42             SSE42 => sse42::match_uri_vectored(bytes),
43             _ /* NOP */ => super::swar::match_uri_vectored(bytes),
44         }
45     }
46 }
47 
match_header_value_vectored(bytes: &mut Bytes)48 pub fn match_header_value_vectored(bytes: &mut Bytes) {
49     // SAFETY: calls are guarded by a feature check
50     unsafe {
51         match get_runtime_feature() {
52             AVX2 => avx2::match_header_value_vectored(bytes),
53             SSE42 => sse42::match_header_value_vectored(bytes),
54             _ /* NOP */ => super::swar::match_header_value_vectored(bytes),
55         }
56     }
57 }
58