1 // Auto-generated file. Do not edit!
2 // Template: src/x8-lut/avx512skx-vpshufb.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <immintrin.h>
13
14 #include <xnnpack/intrinsics-polyfill.h>
15 #include <xnnpack/lut.h>
16 #include <xnnpack/common.h>
17
18
xnn_x8_lut_ukernel__avx512skx_vpshufb_x256(size_t n,const uint8_t * x,uint8_t * y,const uint8_t t[restrict XNN_MIN_ELEMENTS (256)])19 void xnn_x8_lut_ukernel__avx512skx_vpshufb_x256(
20 size_t n,
21 const uint8_t* x,
22 uint8_t* y,
23 const uint8_t t[restrict XNN_MIN_ELEMENTS(256)])
24 {
25 assert(n != 0);
26 assert(x != NULL);
27 assert(y != NULL);
28
29 const __m512i vt0 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) t));
30 const __m512i vt1 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 16)));
31 const __m512i vt2 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 32)));
32 const __m512i vt3 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 48)));
33 const __m512i vt4 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 64)));
34 const __m512i vt5 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 80)));
35 const __m512i vt6 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 96)));
36 const __m512i vt7 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 112)));
37 const __m512i vt8 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 128)));
38 const __m512i vt9 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 144)));
39 const __m512i vtA = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 160)));
40 const __m512i vtB = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 176)));
41 const __m512i vtC = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 192)));
42 const __m512i vtD = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 208)));
43 const __m512i vtE = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 224)));
44 const __m512i vtF = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 240)));
45
46 const __m512i vtable0 = vt0;
47 const __m512i vtable1 = _mm512_xor_si512(vt0, vt1);
48 const __m512i vtable2 = _mm512_xor_si512(vt1, vt2);
49 const __m512i vtable3 = _mm512_xor_si512(vt2, vt3);
50 const __m512i vtable4 = _mm512_xor_si512(vt3, vt4);
51 const __m512i vtable5 = _mm512_xor_si512(vt4, vt5);
52 const __m512i vtable6 = _mm512_xor_si512(vt5, vt6);
53 const __m512i vtable7 = _mm512_xor_si512(vt6, vt7);
54 const __m512i vtable8 = _mm512_xor_si512(_mm512_xor_si512(vt7, vt8), vtable0);
55 const __m512i vtable9 = _mm512_xor_si512(_mm512_xor_si512(vt8, vt9), vtable1);
56 const __m512i vtableA = _mm512_xor_si512(_mm512_xor_si512(vt9, vtA), vtable2);
57 const __m512i vtableB = _mm512_xor_si512(_mm512_xor_si512(vtA, vtB), vtable3);
58 const __m512i vtableC = _mm512_xor_si512(_mm512_xor_si512(vtB, vtC), vtable4);
59 const __m512i vtableD = _mm512_xor_si512(_mm512_xor_si512(vtC, vtD), vtable5);
60 const __m512i vtableE = _mm512_xor_si512(_mm512_xor_si512(vtD, vtE), vtable6);
61 const __m512i vtableF = _mm512_xor_si512(_mm512_xor_si512(vtE, vtF), vtable7);
62
63 const __m512i voffset = _mm512_set1_epi8(16);
64 for (; n >= 256 * sizeof(uint8_t); n -= 256 * sizeof(uint8_t)) {
65 __m512i vx0 = _mm512_loadu_si512(x);
66 __m512i vx1 = _mm512_loadu_si512(x + 64);
67 __m512i vx2 = _mm512_loadu_si512(x + 128);
68 __m512i vx3 = _mm512_loadu_si512(x + 192);
69 x += 256;
70
71 __m512i vy0 = _mm512_shuffle_epi8(vtable0, vx0);
72 __m512i vy1 = _mm512_shuffle_epi8(vtable0, vx1);
73 __m512i vy2 = _mm512_shuffle_epi8(vtable0, vx2);
74 __m512i vy3 = _mm512_shuffle_epi8(vtable0, vx3);
75
76 vx0 = _mm512_sub_epi8(vx0, voffset);
77 vx1 = _mm512_sub_epi8(vx1, voffset);
78 vx2 = _mm512_sub_epi8(vx2, voffset);
79 vx3 = _mm512_sub_epi8(vx3, voffset);
80 vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable1, vx0));
81 vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable1, vx1));
82 vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtable1, vx2));
83 vy3 = _mm512_xor_si512(vy3, _mm512_shuffle_epi8(vtable1, vx3));
84 vx0 = _mm512_sub_epi8(vx0, voffset);
85 vx1 = _mm512_sub_epi8(vx1, voffset);
86 vx2 = _mm512_sub_epi8(vx2, voffset);
87 vx3 = _mm512_sub_epi8(vx3, voffset);
88 vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable2, vx0));
89 vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable2, vx1));
90 vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtable2, vx2));
91 vy3 = _mm512_xor_si512(vy3, _mm512_shuffle_epi8(vtable2, vx3));
92 vx0 = _mm512_sub_epi8(vx0, voffset);
93 vx1 = _mm512_sub_epi8(vx1, voffset);
94 vx2 = _mm512_sub_epi8(vx2, voffset);
95 vx3 = _mm512_sub_epi8(vx3, voffset);
96 vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable3, vx0));
97 vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable3, vx1));
98 vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtable3, vx2));
99 vy3 = _mm512_xor_si512(vy3, _mm512_shuffle_epi8(vtable3, vx3));
100 vx0 = _mm512_sub_epi8(vx0, voffset);
101 vx1 = _mm512_sub_epi8(vx1, voffset);
102 vx2 = _mm512_sub_epi8(vx2, voffset);
103 vx3 = _mm512_sub_epi8(vx3, voffset);
104 vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable4, vx0));
105 vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable4, vx1));
106 vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtable4, vx2));
107 vy3 = _mm512_xor_si512(vy3, _mm512_shuffle_epi8(vtable4, vx3));
108 vx0 = _mm512_sub_epi8(vx0, voffset);
109 vx1 = _mm512_sub_epi8(vx1, voffset);
110 vx2 = _mm512_sub_epi8(vx2, voffset);
111 vx3 = _mm512_sub_epi8(vx3, voffset);
112 vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable5, vx0));
113 vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable5, vx1));
114 vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtable5, vx2));
115 vy3 = _mm512_xor_si512(vy3, _mm512_shuffle_epi8(vtable5, vx3));
116 vx0 = _mm512_sub_epi8(vx0, voffset);
117 vx1 = _mm512_sub_epi8(vx1, voffset);
118 vx2 = _mm512_sub_epi8(vx2, voffset);
119 vx3 = _mm512_sub_epi8(vx3, voffset);
120 vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable6, vx0));
121 vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable6, vx1));
122 vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtable6, vx2));
123 vy3 = _mm512_xor_si512(vy3, _mm512_shuffle_epi8(vtable6, vx3));
124 vx0 = _mm512_sub_epi8(vx0, voffset);
125 vx1 = _mm512_sub_epi8(vx1, voffset);
126 vx2 = _mm512_sub_epi8(vx2, voffset);
127 vx3 = _mm512_sub_epi8(vx3, voffset);
128 vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable7, vx0));
129 vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable7, vx1));
130 vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtable7, vx2));
131 vy3 = _mm512_xor_si512(vy3, _mm512_shuffle_epi8(vtable7, vx3));
132 vx0 = _mm512_sub_epi8(vx0, voffset);
133 vx1 = _mm512_sub_epi8(vx1, voffset);
134 vx2 = _mm512_sub_epi8(vx2, voffset);
135 vx3 = _mm512_sub_epi8(vx3, voffset);
136 vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable8, vx0));
137 vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable8, vx1));
138 vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtable8, vx2));
139 vy3 = _mm512_xor_si512(vy3, _mm512_shuffle_epi8(vtable8, vx3));
140
141 vx0 = _mm512_subs_epi8(vx0, voffset);
142 vx1 = _mm512_subs_epi8(vx1, voffset);
143 vx2 = _mm512_subs_epi8(vx2, voffset);
144 vx3 = _mm512_subs_epi8(vx3, voffset);
145 vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable9, vx0));
146 vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable9, vx1));
147 vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtable9, vx2));
148 vy3 = _mm512_xor_si512(vy3, _mm512_shuffle_epi8(vtable9, vx3));
149 vx0 = _mm512_subs_epi8(vx0, voffset);
150 vx1 = _mm512_subs_epi8(vx1, voffset);
151 vx2 = _mm512_subs_epi8(vx2, voffset);
152 vx3 = _mm512_subs_epi8(vx3, voffset);
153 vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtableA, vx0));
154 vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtableA, vx1));
155 vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtableA, vx2));
156 vy3 = _mm512_xor_si512(vy3, _mm512_shuffle_epi8(vtableA, vx3));
157 vx0 = _mm512_subs_epi8(vx0, voffset);
158 vx1 = _mm512_subs_epi8(vx1, voffset);
159 vx2 = _mm512_subs_epi8(vx2, voffset);
160 vx3 = _mm512_subs_epi8(vx3, voffset);
161 vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtableB, vx0));
162 vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtableB, vx1));
163 vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtableB, vx2));
164 vy3 = _mm512_xor_si512(vy3, _mm512_shuffle_epi8(vtableB, vx3));
165 vx0 = _mm512_subs_epi8(vx0, voffset);
166 vx1 = _mm512_subs_epi8(vx1, voffset);
167 vx2 = _mm512_subs_epi8(vx2, voffset);
168 vx3 = _mm512_subs_epi8(vx3, voffset);
169 vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtableC, vx0));
170 vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtableC, vx1));
171 vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtableC, vx2));
172 vy3 = _mm512_xor_si512(vy3, _mm512_shuffle_epi8(vtableC, vx3));
173 vx0 = _mm512_subs_epi8(vx0, voffset);
174 vx1 = _mm512_subs_epi8(vx1, voffset);
175 vx2 = _mm512_subs_epi8(vx2, voffset);
176 vx3 = _mm512_subs_epi8(vx3, voffset);
177 vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtableD, vx0));
178 vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtableD, vx1));
179 vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtableD, vx2));
180 vy3 = _mm512_xor_si512(vy3, _mm512_shuffle_epi8(vtableD, vx3));
181 vx0 = _mm512_subs_epi8(vx0, voffset);
182 vx1 = _mm512_subs_epi8(vx1, voffset);
183 vx2 = _mm512_subs_epi8(vx2, voffset);
184 vx3 = _mm512_subs_epi8(vx3, voffset);
185 vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtableE, vx0));
186 vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtableE, vx1));
187 vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtableE, vx2));
188 vy3 = _mm512_xor_si512(vy3, _mm512_shuffle_epi8(vtableE, vx3));
189 vx0 = _mm512_subs_epi8(vx0, voffset);
190 vx1 = _mm512_subs_epi8(vx1, voffset);
191 vx2 = _mm512_subs_epi8(vx2, voffset);
192 vx3 = _mm512_subs_epi8(vx3, voffset);
193 vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtableF, vx0));
194 vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtableF, vx1));
195 vy2 = _mm512_xor_si512(vy2, _mm512_shuffle_epi8(vtableF, vx2));
196 vy3 = _mm512_xor_si512(vy3, _mm512_shuffle_epi8(vtableF, vx3));
197
198 _mm512_storeu_si512(y, vy0);
199 _mm512_storeu_si512(y + 64, vy1);
200 _mm512_storeu_si512(y + 128, vy2);
201 _mm512_storeu_si512(y + 192, vy3);
202 y += 256;
203 }
204 for (; n >= 64 * sizeof(uint8_t); n -= 64 * sizeof(uint8_t)) {
205 __m512i vx = _mm512_loadu_si512(x);
206 x += 64;
207
208 __m512i vy = _mm512_shuffle_epi8(vtable0, vx);
209
210 vx = _mm512_sub_epi8(vx, voffset);
211 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable1, vx));
212 vx = _mm512_sub_epi8(vx, voffset);
213 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable2, vx));
214 vx = _mm512_sub_epi8(vx, voffset);
215 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable3, vx));
216 vx = _mm512_sub_epi8(vx, voffset);
217 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable4, vx));
218 vx = _mm512_sub_epi8(vx, voffset);
219 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable5, vx));
220 vx = _mm512_sub_epi8(vx, voffset);
221 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable6, vx));
222 vx = _mm512_sub_epi8(vx, voffset);
223 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable7, vx));
224 vx = _mm512_sub_epi8(vx, voffset);
225 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable8, vx));
226
227 vx = _mm512_subs_epi8(vx, voffset);
228 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable9, vx));
229 vx = _mm512_subs_epi8(vx, voffset);
230 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableA, vx));
231 vx = _mm512_subs_epi8(vx, voffset);
232 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableB, vx));
233 vx = _mm512_subs_epi8(vx, voffset);
234 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableC, vx));
235 vx = _mm512_subs_epi8(vx, voffset);
236 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableD, vx));
237 vx = _mm512_subs_epi8(vx, voffset);
238 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableE, vx));
239 vx = _mm512_subs_epi8(vx, voffset);
240 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableF, vx));
241
242 _mm512_storeu_si512(y, vy);
243 y += 64;
244 }
245 if XNN_UNLIKELY(n != 0) {
246 assert(n < 64);
247 const __mmask64 vmask = _cvtu64_mask64((uint64_t) ((UINT64_C(1) << n) - UINT64_C(1)));
248
249 __m512i vx = _mm512_maskz_loadu_epi8(vmask, x);
250
251 __m512i vy = _mm512_shuffle_epi8(vtable0, vx);
252
253 vx = _mm512_sub_epi8(vx, voffset);
254 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable1, vx));
255 vx = _mm512_sub_epi8(vx, voffset);
256 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable2, vx));
257 vx = _mm512_sub_epi8(vx, voffset);
258 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable3, vx));
259 vx = _mm512_sub_epi8(vx, voffset);
260 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable4, vx));
261 vx = _mm512_sub_epi8(vx, voffset);
262 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable5, vx));
263 vx = _mm512_sub_epi8(vx, voffset);
264 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable6, vx));
265 vx = _mm512_sub_epi8(vx, voffset);
266 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable7, vx));
267 vx = _mm512_sub_epi8(vx, voffset);
268 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable8, vx));
269
270 vx = _mm512_subs_epi8(vx, voffset);
271 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable9, vx));
272 vx = _mm512_subs_epi8(vx, voffset);
273 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableA, vx));
274 vx = _mm512_subs_epi8(vx, voffset);
275 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableB, vx));
276 vx = _mm512_subs_epi8(vx, voffset);
277 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableC, vx));
278 vx = _mm512_subs_epi8(vx, voffset);
279 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableD, vx));
280 vx = _mm512_subs_epi8(vx, voffset);
281 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableE, vx));
282 vx = _mm512_subs_epi8(vx, voffset);
283 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableF, vx));
284
285 _mm512_mask_storeu_epi8(y, vmask, vy);
286 }
287 }
288