1 // Auto-generated file. Do not edit!
2 // Template: src/x8-lut/avx2.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <immintrin.h>
13
14 #include <xnnpack/intrinsics-polyfill.h>
15 #include <xnnpack/lut.h>
16 #include <xnnpack/common.h>
17
18
xnn_x8_lut_ukernel__avx2_x128(size_t n,const uint8_t * x,uint8_t * y,const uint8_t t[restrict XNN_MIN_ELEMENTS (256)])19 void xnn_x8_lut_ukernel__avx2_x128(
20 size_t n,
21 const uint8_t* x,
22 uint8_t* y,
23 const uint8_t t[restrict XNN_MIN_ELEMENTS(256)])
24 {
25 assert(n != 0);
26 assert(x != NULL);
27 assert(y != NULL);
28
29 const __m256i vt0 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) t));
30 const __m256i vt1 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 16)));
31 const __m256i vt2 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 32)));
32 const __m256i vt3 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 48)));
33 const __m256i vt4 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 64)));
34 const __m256i vt5 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 80)));
35 const __m256i vt6 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 96)));
36 const __m256i vt7 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 112)));
37 const __m256i vt8 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 128)));
38 const __m256i vt9 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 144)));
39 const __m256i vtA = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 160)));
40 const __m256i vtB = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 176)));
41 const __m256i vtC = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 192)));
42 const __m256i vtD = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 208)));
43 const __m256i vtE = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 224)));
44 const __m256i vtF = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 240)));
45
46 const __m256i vtable0 = vt0;
47 const __m256i vtable1 = _mm256_xor_si256(vt0, vt1);
48 const __m256i vtable2 = _mm256_xor_si256(vt1, vt2);
49 const __m256i vtable3 = _mm256_xor_si256(vt2, vt3);
50 const __m256i vtable4 = _mm256_xor_si256(vt3, vt4);
51 const __m256i vtable5 = _mm256_xor_si256(vt4, vt5);
52 const __m256i vtable6 = _mm256_xor_si256(vt5, vt6);
53 const __m256i vtable7 = _mm256_xor_si256(vt6, vt7);
54 const __m256i vtable8 = _mm256_xor_si256(_mm256_xor_si256(vt7, vt8), vtable0);
55 const __m256i vtable9 = _mm256_xor_si256(_mm256_xor_si256(vt8, vt9), vtable1);
56 const __m256i vtableA = _mm256_xor_si256(_mm256_xor_si256(vt9, vtA), vtable2);
57 const __m256i vtableB = _mm256_xor_si256(_mm256_xor_si256(vtA, vtB), vtable3);
58 const __m256i vtableC = _mm256_xor_si256(_mm256_xor_si256(vtB, vtC), vtable4);
59 const __m256i vtableD = _mm256_xor_si256(_mm256_xor_si256(vtC, vtD), vtable5);
60 const __m256i vtableE = _mm256_xor_si256(_mm256_xor_si256(vtD, vtE), vtable6);
61 const __m256i vtableF = _mm256_xor_si256(_mm256_xor_si256(vtE, vtF), vtable7);
62
63 const __m256i voffset = _mm256_set1_epi8(16);
64 for (; n >= 128 * sizeof(uint8_t); n -= 128 * sizeof(uint8_t)) {
65 __m256i vx0 = _mm256_loadu_si256((const __m256i*) x);
66 __m256i vx1 = _mm256_loadu_si256((const __m256i*) (x + 32));
67 __m256i vx2 = _mm256_loadu_si256((const __m256i*) (x + 64));
68 __m256i vx3 = _mm256_loadu_si256((const __m256i*) (x + 96));
69 x += 128;
70
71 __m256i vy0 = _mm256_shuffle_epi8(vtable0, vx0);
72 __m256i vy1 = _mm256_shuffle_epi8(vtable0, vx1);
73 __m256i vy2 = _mm256_shuffle_epi8(vtable0, vx2);
74 __m256i vy3 = _mm256_shuffle_epi8(vtable0, vx3);
75
76 vx0 = _mm256_sub_epi8(vx0, voffset);
77 vx1 = _mm256_sub_epi8(vx1, voffset);
78 vx2 = _mm256_sub_epi8(vx2, voffset);
79 vx3 = _mm256_sub_epi8(vx3, voffset);
80 vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable1, vx0));
81 vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable1, vx1));
82 vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtable1, vx2));
83 vy3 = _mm256_xor_si256(vy3, _mm256_shuffle_epi8(vtable1, vx3));
84 vx0 = _mm256_sub_epi8(vx0, voffset);
85 vx1 = _mm256_sub_epi8(vx1, voffset);
86 vx2 = _mm256_sub_epi8(vx2, voffset);
87 vx3 = _mm256_sub_epi8(vx3, voffset);
88 vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable2, vx0));
89 vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable2, vx1));
90 vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtable2, vx2));
91 vy3 = _mm256_xor_si256(vy3, _mm256_shuffle_epi8(vtable2, vx3));
92 vx0 = _mm256_sub_epi8(vx0, voffset);
93 vx1 = _mm256_sub_epi8(vx1, voffset);
94 vx2 = _mm256_sub_epi8(vx2, voffset);
95 vx3 = _mm256_sub_epi8(vx3, voffset);
96 vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable3, vx0));
97 vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable3, vx1));
98 vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtable3, vx2));
99 vy3 = _mm256_xor_si256(vy3, _mm256_shuffle_epi8(vtable3, vx3));
100 vx0 = _mm256_sub_epi8(vx0, voffset);
101 vx1 = _mm256_sub_epi8(vx1, voffset);
102 vx2 = _mm256_sub_epi8(vx2, voffset);
103 vx3 = _mm256_sub_epi8(vx3, voffset);
104 vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable4, vx0));
105 vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable4, vx1));
106 vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtable4, vx2));
107 vy3 = _mm256_xor_si256(vy3, _mm256_shuffle_epi8(vtable4, vx3));
108 vx0 = _mm256_sub_epi8(vx0, voffset);
109 vx1 = _mm256_sub_epi8(vx1, voffset);
110 vx2 = _mm256_sub_epi8(vx2, voffset);
111 vx3 = _mm256_sub_epi8(vx3, voffset);
112 vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable5, vx0));
113 vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable5, vx1));
114 vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtable5, vx2));
115 vy3 = _mm256_xor_si256(vy3, _mm256_shuffle_epi8(vtable5, vx3));
116 vx0 = _mm256_sub_epi8(vx0, voffset);
117 vx1 = _mm256_sub_epi8(vx1, voffset);
118 vx2 = _mm256_sub_epi8(vx2, voffset);
119 vx3 = _mm256_sub_epi8(vx3, voffset);
120 vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable6, vx0));
121 vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable6, vx1));
122 vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtable6, vx2));
123 vy3 = _mm256_xor_si256(vy3, _mm256_shuffle_epi8(vtable6, vx3));
124 vx0 = _mm256_sub_epi8(vx0, voffset);
125 vx1 = _mm256_sub_epi8(vx1, voffset);
126 vx2 = _mm256_sub_epi8(vx2, voffset);
127 vx3 = _mm256_sub_epi8(vx3, voffset);
128 vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable7, vx0));
129 vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable7, vx1));
130 vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtable7, vx2));
131 vy3 = _mm256_xor_si256(vy3, _mm256_shuffle_epi8(vtable7, vx3));
132 vx0 = _mm256_sub_epi8(vx0, voffset);
133 vx1 = _mm256_sub_epi8(vx1, voffset);
134 vx2 = _mm256_sub_epi8(vx2, voffset);
135 vx3 = _mm256_sub_epi8(vx3, voffset);
136 vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable8, vx0));
137 vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable8, vx1));
138 vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtable8, vx2));
139 vy3 = _mm256_xor_si256(vy3, _mm256_shuffle_epi8(vtable8, vx3));
140
141 vx0 = _mm256_subs_epi8(vx0, voffset);
142 vx1 = _mm256_subs_epi8(vx1, voffset);
143 vx2 = _mm256_subs_epi8(vx2, voffset);
144 vx3 = _mm256_subs_epi8(vx3, voffset);
145 vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable9, vx0));
146 vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable9, vx1));
147 vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtable9, vx2));
148 vy3 = _mm256_xor_si256(vy3, _mm256_shuffle_epi8(vtable9, vx3));
149 vx0 = _mm256_subs_epi8(vx0, voffset);
150 vx1 = _mm256_subs_epi8(vx1, voffset);
151 vx2 = _mm256_subs_epi8(vx2, voffset);
152 vx3 = _mm256_subs_epi8(vx3, voffset);
153 vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableA, vx0));
154 vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtableA, vx1));
155 vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtableA, vx2));
156 vy3 = _mm256_xor_si256(vy3, _mm256_shuffle_epi8(vtableA, vx3));
157 vx0 = _mm256_subs_epi8(vx0, voffset);
158 vx1 = _mm256_subs_epi8(vx1, voffset);
159 vx2 = _mm256_subs_epi8(vx2, voffset);
160 vx3 = _mm256_subs_epi8(vx3, voffset);
161 vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableB, vx0));
162 vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtableB, vx1));
163 vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtableB, vx2));
164 vy3 = _mm256_xor_si256(vy3, _mm256_shuffle_epi8(vtableB, vx3));
165 vx0 = _mm256_subs_epi8(vx0, voffset);
166 vx1 = _mm256_subs_epi8(vx1, voffset);
167 vx2 = _mm256_subs_epi8(vx2, voffset);
168 vx3 = _mm256_subs_epi8(vx3, voffset);
169 vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableC, vx0));
170 vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtableC, vx1));
171 vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtableC, vx2));
172 vy3 = _mm256_xor_si256(vy3, _mm256_shuffle_epi8(vtableC, vx3));
173 vx0 = _mm256_subs_epi8(vx0, voffset);
174 vx1 = _mm256_subs_epi8(vx1, voffset);
175 vx2 = _mm256_subs_epi8(vx2, voffset);
176 vx3 = _mm256_subs_epi8(vx3, voffset);
177 vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableD, vx0));
178 vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtableD, vx1));
179 vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtableD, vx2));
180 vy3 = _mm256_xor_si256(vy3, _mm256_shuffle_epi8(vtableD, vx3));
181 vx0 = _mm256_subs_epi8(vx0, voffset);
182 vx1 = _mm256_subs_epi8(vx1, voffset);
183 vx2 = _mm256_subs_epi8(vx2, voffset);
184 vx3 = _mm256_subs_epi8(vx3, voffset);
185 vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableE, vx0));
186 vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtableE, vx1));
187 vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtableE, vx2));
188 vy3 = _mm256_xor_si256(vy3, _mm256_shuffle_epi8(vtableE, vx3));
189 vx0 = _mm256_subs_epi8(vx0, voffset);
190 vx1 = _mm256_subs_epi8(vx1, voffset);
191 vx2 = _mm256_subs_epi8(vx2, voffset);
192 vx3 = _mm256_subs_epi8(vx3, voffset);
193 vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableF, vx0));
194 vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtableF, vx1));
195 vy2 = _mm256_xor_si256(vy2, _mm256_shuffle_epi8(vtableF, vx2));
196 vy3 = _mm256_xor_si256(vy3, _mm256_shuffle_epi8(vtableF, vx3));
197
198 _mm256_storeu_si256((__m256i*) y, vy0);
199 _mm256_storeu_si256((__m256i*) (y + 32), vy1);
200 _mm256_storeu_si256((__m256i*) (y + 64), vy2);
201 _mm256_storeu_si256((__m256i*) (y + 96), vy3);
202 y += 128;
203 }
204 for (; n >= 16 * sizeof(uint8_t); n -= 16 * sizeof(uint8_t)) {
205 __m128i vx = _mm_loadu_si128((const __m128i*) x);
206 x += 16;
207
208 __m128i vy = _mm_shuffle_epi8(_mm256_castsi256_si128(vtable0), vx);
209
210 vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
211 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable1), vx));
212 vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
213 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable2), vx));
214 vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
215 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable3), vx));
216 vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
217 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable4), vx));
218 vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
219 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable5), vx));
220 vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
221 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable6), vx));
222 vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
223 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable7), vx));
224 vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
225 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable8), vx));
226
227 vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
228 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable9), vx));
229 vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
230 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableA), vx));
231 vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
232 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableB), vx));
233 vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
234 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableC), vx));
235 vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
236 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableD), vx));
237 vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
238 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableE), vx));
239 vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
240 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableF), vx));
241
242 _mm_storeu_si128((__m128i*) y, vy);
243 y += 16;
244 }
245 if XNN_UNLIKELY(n != 0) {
246 __m128i vx = _mm_loadu_si128((const __m128i*) x);
247
248 __m128i vy = _mm_shuffle_epi8(_mm256_castsi256_si128(vtable0), vx);
249
250 vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
251 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable1), vx));
252 vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
253 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable2), vx));
254 vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
255 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable3), vx));
256 vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
257 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable4), vx));
258 vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
259 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable5), vx));
260 vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
261 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable6), vx));
262 vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
263 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable7), vx));
264 vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
265 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable8), vx));
266
267 vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
268 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable9), vx));
269 vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
270 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableA), vx));
271 vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
272 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableB), vx));
273 vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
274 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableC), vx));
275 vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
276 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableD), vx));
277 vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
278 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableE), vx));
279 vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
280 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableF), vx));
281
282 if (n & (8 * sizeof(uint8_t))) {
283 _mm_storel_epi64((__m128i*) y, vy);
284 vy = _mm_unpackhi_epi64(vy, vy);
285 y += 8;
286 }
287 if (n & (4 * sizeof(uint8_t))) {
288 _mm_storeu_si32(y, vy);
289 vy = _mm_srli_epi64(vy, 32);
290 y += 4;
291 }
292 if (n & (2 * sizeof(uint8_t))) {
293 _mm_storeu_si16(y, vy);
294 vy = _mm_srli_epi32(vy, 16);
295 y += 2;
296 }
297 if (n & (1 * sizeof(uint8_t))) {
298 *y = (uint8_t) _mm_extract_epi8(vy, 0);
299 }
300 }
301 }
302