1 /* chunkset_avx.c -- AVX inline functions to copy small data chunks.
2 * For conditions of distribution and use, see copyright notice in zlib.h
3 */
4 #include "zbuild.h"
5
6 #ifdef X86_AVX_CHUNKSET
7 #include <immintrin.h>
8 #include "chunk_permute_table.h"
9
10 typedef __m256i chunk_t;
11
12 #define CHUNK_SIZE 32
13
14 #define HAVE_CHUNKMEMSET_2
15 #define HAVE_CHUNKMEMSET_4
16 #define HAVE_CHUNKMEMSET_8
17 #define HAVE_CHUNK_MAG
18
19 /* Populate don't cares so that this is a direct lookup (with some indirection into the permute table), because dist can
20 * never be 0 - 2, we'll start with an offset, subtracting 3 from the input */
21 static const lut_rem_pair perm_idx_lut[29] = {
22 { 0, 2}, /* 3 */
23 { 0, 0}, /* don't care */
24 { 1 * 32, 2}, /* 5 */
25 { 2 * 32, 2}, /* 6 */
26 { 3 * 32, 4}, /* 7 */
27 { 0 * 32, 0}, /* don't care */
28 { 4 * 32, 5}, /* 9 */
29 { 5 * 32, 22}, /* 10 */
30 { 6 * 32, 21}, /* 11 */
31 { 7 * 32, 20}, /* 12 */
32 { 8 * 32, 6}, /* 13 */
33 { 9 * 32, 4}, /* 14 */
34 {10 * 32, 2}, /* 15 */
35 { 0 * 32, 0}, /* don't care */
36 {11 * 32, 15}, /* 17 */
37 {11 * 32 + 16, 14}, /* 18 */
38 {11 * 32 + 16 * 2, 13}, /* 19 */
39 {11 * 32 + 16 * 3, 12}, /* 20 */
40 {11 * 32 + 16 * 4, 11}, /* 21 */
41 {11 * 32 + 16 * 5, 10}, /* 22 */
42 {11 * 32 + 16 * 6, 9}, /* 23 */
43 {11 * 32 + 16 * 7, 8}, /* 24 */
44 {11 * 32 + 16 * 8, 7}, /* 25 */
45 {11 * 32 + 16 * 9, 6}, /* 26 */
46 {11 * 32 + 16 * 10, 5}, /* 27 */
47 {11 * 32 + 16 * 11, 4}, /* 28 */
48 {11 * 32 + 16 * 12, 3}, /* 29 */
49 {11 * 32 + 16 * 13, 2}, /* 30 */
50 {11 * 32 + 16 * 14, 1} /* 31 */
51 };
52
chunkmemset_2(uint8_t * from,chunk_t * chunk)53 static inline void chunkmemset_2(uint8_t *from, chunk_t *chunk) {
54 int16_t tmp;
55 zmemcpy_2(&tmp, from);
56 *chunk = _mm256_set1_epi16(tmp);
57 }
58
chunkmemset_4(uint8_t * from,chunk_t * chunk)59 static inline void chunkmemset_4(uint8_t *from, chunk_t *chunk) {
60 int32_t tmp;
61 zmemcpy_4(&tmp, from);
62 *chunk = _mm256_set1_epi32(tmp);
63 }
64
chunkmemset_8(uint8_t * from,chunk_t * chunk)65 static inline void chunkmemset_8(uint8_t *from, chunk_t *chunk) {
66 int64_t tmp;
67 zmemcpy_8(&tmp, from);
68 *chunk = _mm256_set1_epi64x(tmp);
69 }
70
loadchunk(uint8_t const * s,chunk_t * chunk)71 static inline void loadchunk(uint8_t const *s, chunk_t *chunk) {
72 *chunk = _mm256_loadu_si256((__m256i *)s);
73 }
74
storechunk(uint8_t * out,chunk_t * chunk)75 static inline void storechunk(uint8_t *out, chunk_t *chunk) {
76 _mm256_storeu_si256((__m256i *)out, *chunk);
77 }
78
GET_CHUNK_MAG(uint8_t * buf,uint32_t * chunk_rem,uint32_t dist)79 static inline chunk_t GET_CHUNK_MAG(uint8_t *buf, uint32_t *chunk_rem, uint32_t dist) {
80 lut_rem_pair lut_rem = perm_idx_lut[dist - 3];
81 __m256i ret_vec;
82 /* While technically we only need to read 4 or 8 bytes into this vector register for a lot of cases, GCC is
83 * compiling this to a shared load for all branches, preferring the simpler code. Given that the buf value isn't in
84 * GPRs to begin with the 256 bit load is _probably_ just as inexpensive */
85 *chunk_rem = lut_rem.remval;
86
87 #ifdef Z_MEMORY_SANITIZER
88 /* See note in chunkset_sse4.c for why this is ok */
89 __msan_unpoison(buf + dist, 32 - dist);
90 #endif
91
92 if (dist < 16) {
93 /* This simpler case still requires us to shuffle in 128 bit lanes, so we must apply a static offset after
94 * broadcasting the first vector register to both halves. This is _marginally_ faster than doing two separate
95 * shuffles and combining the halves later */
96 const __m256i permute_xform =
97 _mm256_setr_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
98 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16);
99 __m256i perm_vec = _mm256_load_si256((__m256i*)(permute_table+lut_rem.idx));
100 __m128i ret_vec0 = _mm_loadu_si128((__m128i*)buf);
101 perm_vec = _mm256_add_epi8(perm_vec, permute_xform);
102 ret_vec = _mm256_inserti128_si256(_mm256_castsi128_si256(ret_vec0), ret_vec0, 1);
103 ret_vec = _mm256_shuffle_epi8(ret_vec, perm_vec);
104 } else if (dist == 16) {
105 __m128i ret_vec0 = _mm_loadu_si128((__m128i*)buf);
106 return _mm256_inserti128_si256(_mm256_castsi128_si256(ret_vec0), ret_vec0, 1);
107 } else {
108 __m128i ret_vec0 = _mm_loadu_si128((__m128i*)buf);
109 __m128i ret_vec1 = _mm_loadu_si128((__m128i*)(buf + 16));
110 /* Take advantage of the fact that only the latter half of the 256 bit vector will actually differ */
111 __m128i perm_vec1 = _mm_load_si128((__m128i*)(permute_table + lut_rem.idx));
112 __m128i xlane_permutes = _mm_cmpgt_epi8(_mm_set1_epi8(16), perm_vec1);
113 __m128i xlane_res = _mm_shuffle_epi8(ret_vec0, perm_vec1);
114 /* Since we can't wrap twice, we can simply keep the later half exactly how it is instead of having to _also_
115 * shuffle those values */
116 __m128i latter_half = _mm_blendv_epi8(ret_vec1, xlane_res, xlane_permutes);
117 ret_vec = _mm256_inserti128_si256(_mm256_castsi128_si256(ret_vec0), latter_half, 1);
118 }
119
120 return ret_vec;
121 }
122
123 #define CHUNKSIZE chunksize_avx
124 #define CHUNKCOPY chunkcopy_avx
125 #define CHUNKCOPY_SAFE chunkcopy_safe_avx
126 #define CHUNKUNROLL chunkunroll_avx
127 #define CHUNKMEMSET chunkmemset_avx
128 #define CHUNKMEMSET_SAFE chunkmemset_safe_avx
129
130 #include "chunkset_tpl.h"
131
132 #endif
133