1 /* adler32_avx512_tpl.h -- adler32 avx512 vectorized function templates
2  * Copyright (C) 2022 Adam Stylinski
3  * For conditions of distribution and use, see copyright notice in zlib.h
4  */
5 
6 #include "../../zbuild.h"
7 #include "../../adler32_p.h"
8 #include "../../adler32_fold.h"
9 #include "../../cpu_features.h"
10 #include "../../fallback_builtins.h"
11 #include <immintrin.h>
12 #include "adler32_avx512_p.h"
13 
14 #ifdef X86_AVX512_ADLER32
15 
16 #ifdef COPY
adler32_fold_copy_avx512(uint32_t adler,uint8_t * dst,const uint8_t * src,size_t len)17 Z_INTERNAL uint32_t adler32_fold_copy_avx512(uint32_t adler, uint8_t *dst, const uint8_t *src, size_t len) {
18 #else
19 Z_INTERNAL uint32_t adler32_avx512(uint32_t adler, const uint8_t *src, size_t len) {
20 #endif
21 
22     if (src == NULL) return 1L;
23     if (len == 0) return adler;
24 
25     uint32_t adler0, adler1;
26     adler1 = (adler >> 16) & 0xffff;
27     adler0 = adler & 0xffff;
28 
29 rem_peel:
30     if (len < 64) {
31         /* This handles the remaining copies, just call normal adler checksum after this */
32 #ifdef COPY
33         __mmask64 storemask = (0xFFFFFFFFFFFFFFFFUL >> (64 - len));
34         __m512i copy_vec = _mm512_maskz_loadu_epi8(storemask, src);
35         _mm512_mask_storeu_epi8(dst, storemask, copy_vec);
36 #endif
37 
38 #ifdef X86_AVX2_ADLER32
39         return adler32_avx2(adler, src, len);
40 #elif defined(X86_SSSE3_ADLER32)
41         return adler32_ssse3(adler, src, len);
42 #else
43         return adler32_len_16(adler0, src, len, adler1);
44 #endif
45     }
46 
47     __m512i vbuf, vs1_0, vs3;
48 
49     const __m512i dot2v = _mm512_set_epi8(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
50                                           20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
51                                           38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
52                                           56, 57, 58, 59, 60, 61, 62, 63, 64);
53     const __m512i dot3v = _mm512_set1_epi16(1);
54     const __m512i zero = _mm512_setzero_si512();
55     size_t k;
56 
57     while (len >= 64) {
58         __m512i vs1 = _mm512_zextsi128_si512(_mm_cvtsi32_si128(adler0));
59         __m512i vs2 = _mm512_zextsi128_si512(_mm_cvtsi32_si128(adler1));
60         vs1_0 = vs1;
61         vs3 = _mm512_setzero_si512();
62 
63         k = MIN(len, NMAX);
64         k -= k % 64;
65         len -= k;
66 
67         while (k >= 64) {
68             /*
69                vs1 = adler + sum(c[i])
70                vs2 = sum2 + 64 vs1 + sum( (64-i+1) c[i] )
71             */
72             vbuf = _mm512_loadu_si512(src);
73 #ifdef COPY
74             _mm512_storeu_si512(dst, vbuf);
75             dst += 64;
76 #endif
77             src += 64;
78             k -= 64;
79 
80             __m512i vs1_sad = _mm512_sad_epu8(vbuf, zero);
81             __m512i v_short_sum2 = _mm512_maddubs_epi16(vbuf, dot2v);
82             vs1 = _mm512_add_epi32(vs1_sad, vs1);
83             vs3 = _mm512_add_epi32(vs3, vs1_0);
84             __m512i vsum2 = _mm512_madd_epi16(v_short_sum2, dot3v);
85             vs2 = _mm512_add_epi32(vsum2, vs2);
86             vs1_0 = vs1;
87         }
88 
89         vs3 = _mm512_slli_epi32(vs3, 6);
90         vs2 = _mm512_add_epi32(vs2, vs3);
91 
92         adler0 = partial_hsum(vs1) % BASE;
93         adler1 = _mm512_reduce_add_epu32(vs2) % BASE;
94     }
95 
96     adler = adler0 | (adler1 << 16);
97 
98     /* Process tail (len < 64). */
99     if (len) {
100         goto rem_peel;
101     }
102 
103     return adler;
104 }
105 
106 #endif
107