1 // Auto-generated file. Do not edit!
2 // Template: src/x8-lut/avx2.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <immintrin.h>
13
14 #include <xnnpack/intrinsics-polyfill.h>
15 #include <xnnpack/lut.h>
16 #include <xnnpack/common.h>
17
18
xnn_x8_lut_ukernel__avx2_x32(size_t n,const uint8_t * x,uint8_t * y,const uint8_t t[restrict XNN_MIN_ELEMENTS (256)])19 void xnn_x8_lut_ukernel__avx2_x32(
20 size_t n,
21 const uint8_t* x,
22 uint8_t* y,
23 const uint8_t t[restrict XNN_MIN_ELEMENTS(256)])
24 {
25 assert(n != 0);
26 assert(x != NULL);
27 assert(y != NULL);
28
29 const __m256i vt0 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) t));
30 const __m256i vt1 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 16)));
31 const __m256i vt2 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 32)));
32 const __m256i vt3 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 48)));
33 const __m256i vt4 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 64)));
34 const __m256i vt5 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 80)));
35 const __m256i vt6 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 96)));
36 const __m256i vt7 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 112)));
37 const __m256i vt8 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 128)));
38 const __m256i vt9 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 144)));
39 const __m256i vtA = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 160)));
40 const __m256i vtB = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 176)));
41 const __m256i vtC = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 192)));
42 const __m256i vtD = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 208)));
43 const __m256i vtE = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 224)));
44 const __m256i vtF = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 240)));
45
46 const __m256i vtable0 = vt0;
47 const __m256i vtable1 = _mm256_xor_si256(vt0, vt1);
48 const __m256i vtable2 = _mm256_xor_si256(vt1, vt2);
49 const __m256i vtable3 = _mm256_xor_si256(vt2, vt3);
50 const __m256i vtable4 = _mm256_xor_si256(vt3, vt4);
51 const __m256i vtable5 = _mm256_xor_si256(vt4, vt5);
52 const __m256i vtable6 = _mm256_xor_si256(vt5, vt6);
53 const __m256i vtable7 = _mm256_xor_si256(vt6, vt7);
54 const __m256i vtable8 = _mm256_xor_si256(_mm256_xor_si256(vt7, vt8), vtable0);
55 const __m256i vtable9 = _mm256_xor_si256(_mm256_xor_si256(vt8, vt9), vtable1);
56 const __m256i vtableA = _mm256_xor_si256(_mm256_xor_si256(vt9, vtA), vtable2);
57 const __m256i vtableB = _mm256_xor_si256(_mm256_xor_si256(vtA, vtB), vtable3);
58 const __m256i vtableC = _mm256_xor_si256(_mm256_xor_si256(vtB, vtC), vtable4);
59 const __m256i vtableD = _mm256_xor_si256(_mm256_xor_si256(vtC, vtD), vtable5);
60 const __m256i vtableE = _mm256_xor_si256(_mm256_xor_si256(vtD, vtE), vtable6);
61 const __m256i vtableF = _mm256_xor_si256(_mm256_xor_si256(vtE, vtF), vtable7);
62
63 const __m256i voffset = _mm256_set1_epi8(16);
64 for (; n >= 32 * sizeof(uint8_t); n -= 32 * sizeof(uint8_t)) {
65 __m256i vx0 = _mm256_loadu_si256((const __m256i*) x);
66 x += 32;
67
68 __m256i vy0 = _mm256_shuffle_epi8(vtable0, vx0);
69
70 vx0 = _mm256_sub_epi8(vx0, voffset);
71 vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable1, vx0));
72 vx0 = _mm256_sub_epi8(vx0, voffset);
73 vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable2, vx0));
74 vx0 = _mm256_sub_epi8(vx0, voffset);
75 vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable3, vx0));
76 vx0 = _mm256_sub_epi8(vx0, voffset);
77 vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable4, vx0));
78 vx0 = _mm256_sub_epi8(vx0, voffset);
79 vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable5, vx0));
80 vx0 = _mm256_sub_epi8(vx0, voffset);
81 vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable6, vx0));
82 vx0 = _mm256_sub_epi8(vx0, voffset);
83 vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable7, vx0));
84 vx0 = _mm256_sub_epi8(vx0, voffset);
85 vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable8, vx0));
86
87 vx0 = _mm256_subs_epi8(vx0, voffset);
88 vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable9, vx0));
89 vx0 = _mm256_subs_epi8(vx0, voffset);
90 vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableA, vx0));
91 vx0 = _mm256_subs_epi8(vx0, voffset);
92 vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableB, vx0));
93 vx0 = _mm256_subs_epi8(vx0, voffset);
94 vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableC, vx0));
95 vx0 = _mm256_subs_epi8(vx0, voffset);
96 vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableD, vx0));
97 vx0 = _mm256_subs_epi8(vx0, voffset);
98 vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableE, vx0));
99 vx0 = _mm256_subs_epi8(vx0, voffset);
100 vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableF, vx0));
101
102 _mm256_storeu_si256((__m256i*) y, vy0);
103 y += 32;
104 }
105 for (; n >= 16 * sizeof(uint8_t); n -= 16 * sizeof(uint8_t)) {
106 __m128i vx = _mm_loadu_si128((const __m128i*) x);
107 x += 16;
108
109 __m128i vy = _mm_shuffle_epi8(_mm256_castsi256_si128(vtable0), vx);
110
111 vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
112 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable1), vx));
113 vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
114 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable2), vx));
115 vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
116 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable3), vx));
117 vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
118 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable4), vx));
119 vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
120 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable5), vx));
121 vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
122 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable6), vx));
123 vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
124 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable7), vx));
125 vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
126 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable8), vx));
127
128 vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
129 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable9), vx));
130 vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
131 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableA), vx));
132 vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
133 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableB), vx));
134 vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
135 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableC), vx));
136 vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
137 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableD), vx));
138 vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
139 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableE), vx));
140 vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
141 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableF), vx));
142
143 _mm_storeu_si128((__m128i*) y, vy);
144 y += 16;
145 }
146 if XNN_UNLIKELY(n != 0) {
147 __m128i vx = _mm_loadu_si128((const __m128i*) x);
148
149 __m128i vy = _mm_shuffle_epi8(_mm256_castsi256_si128(vtable0), vx);
150
151 vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
152 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable1), vx));
153 vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
154 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable2), vx));
155 vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
156 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable3), vx));
157 vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
158 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable4), vx));
159 vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
160 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable5), vx));
161 vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
162 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable6), vx));
163 vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
164 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable7), vx));
165 vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
166 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable8), vx));
167
168 vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
169 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable9), vx));
170 vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
171 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableA), vx));
172 vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
173 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableB), vx));
174 vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
175 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableC), vx));
176 vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
177 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableD), vx));
178 vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
179 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableE), vx));
180 vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
181 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableF), vx));
182
183 if (n & (8 * sizeof(uint8_t))) {
184 _mm_storel_epi64((__m128i*) y, vy);
185 vy = _mm_unpackhi_epi64(vy, vy);
186 y += 8;
187 }
188 if (n & (4 * sizeof(uint8_t))) {
189 _mm_storeu_si32(y, vy);
190 vy = _mm_srli_epi64(vy, 32);
191 y += 4;
192 }
193 if (n & (2 * sizeof(uint8_t))) {
194 _mm_storeu_si16(y, vy);
195 vy = _mm_srli_epi32(vy, 16);
196 y += 2;
197 }
198 if (n & (1 * sizeof(uint8_t))) {
199 *y = (uint8_t) _mm_extract_epi8(vy, 0);
200 }
201 }
202 }
203