1 /* adler32_avx2_p.h -- adler32 avx2 utility functions
2  * Copyright (C) 2022 Adam Stylinski
3  * For conditions of distribution and use, see copyright notice in zlib.h
4  */
5 
6 #ifndef ADLER32_AVX2_P_H_
7 #define ADLER32_AVX2_P_H_
8 
9 #if defined(X86_AVX2_ADLER32) || defined(X86_AVX512VNNI_ADLER32)
10 
11 /* 32 bit horizontal sum, adapted from Agner Fog's vector library. */
hsum256(__m256i x)12 static inline uint32_t hsum256(__m256i x) {
13     __m128i sum1  = _mm_add_epi32(_mm256_extracti128_si256(x, 1),
14                                   _mm256_castsi256_si128(x));
15     __m128i sum2  = _mm_add_epi32(sum1, _mm_unpackhi_epi64(sum1, sum1));
16     __m128i sum3  = _mm_add_epi32(sum2, _mm_shuffle_epi32(sum2, 1));
17     return (uint32_t)_mm_cvtsi128_si32(sum3);
18 }
19 
partial_hsum256(__m256i x)20 static inline uint32_t partial_hsum256(__m256i x) {
21     /* We need a permutation vector to extract every other integer. The
22      * rest are going to be zeros */
23     const __m256i perm_vec = _mm256_setr_epi32(0, 2, 4, 6, 1, 1, 1, 1);
24     __m256i non_zero = _mm256_permutevar8x32_epi32(x, perm_vec);
25     __m128i non_zero_sse = _mm256_castsi256_si128(non_zero);
26     __m128i sum2  = _mm_add_epi32(non_zero_sse,_mm_unpackhi_epi64(non_zero_sse, non_zero_sse));
27     __m128i sum3  = _mm_add_epi32(sum2, _mm_shuffle_epi32(sum2, 1));
28     return (uint32_t)_mm_cvtsi128_si32(sum3);
29 }
30 #endif
31 
32 #endif
33