1 // Auto-generated file. Do not edit!
2 // Template: src/x8-lut/avx512skx-vpshufb.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <immintrin.h>
13
14 #include <xnnpack/intrinsics-polyfill.h>
15 #include <xnnpack/lut.h>
16 #include <xnnpack/common.h>
17
18
xnn_x8_lut_ukernel__avx512skx_vpshufb_x128(size_t n,const uint8_t * x,uint8_t * y,const uint8_t t[restrict XNN_MIN_ELEMENTS (256)])19 void xnn_x8_lut_ukernel__avx512skx_vpshufb_x128(
20 size_t n,
21 const uint8_t* x,
22 uint8_t* y,
23 const uint8_t t[restrict XNN_MIN_ELEMENTS(256)])
24 {
25 assert(n != 0);
26 assert(x != NULL);
27 assert(y != NULL);
28
29 const __m512i vt0 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) t));
30 const __m512i vt1 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 16)));
31 const __m512i vt2 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 32)));
32 const __m512i vt3 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 48)));
33 const __m512i vt4 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 64)));
34 const __m512i vt5 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 80)));
35 const __m512i vt6 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 96)));
36 const __m512i vt7 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 112)));
37 const __m512i vt8 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 128)));
38 const __m512i vt9 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 144)));
39 const __m512i vtA = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 160)));
40 const __m512i vtB = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 176)));
41 const __m512i vtC = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 192)));
42 const __m512i vtD = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 208)));
43 const __m512i vtE = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 224)));
44 const __m512i vtF = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 240)));
45
46 const __m512i vtable0 = vt0;
47 const __m512i vtable1 = _mm512_xor_si512(vt0, vt1);
48 const __m512i vtable2 = _mm512_xor_si512(vt1, vt2);
49 const __m512i vtable3 = _mm512_xor_si512(vt2, vt3);
50 const __m512i vtable4 = _mm512_xor_si512(vt3, vt4);
51 const __m512i vtable5 = _mm512_xor_si512(vt4, vt5);
52 const __m512i vtable6 = _mm512_xor_si512(vt5, vt6);
53 const __m512i vtable7 = _mm512_xor_si512(vt6, vt7);
54 const __m512i vtable8 = _mm512_xor_si512(_mm512_xor_si512(vt7, vt8), vtable0);
55 const __m512i vtable9 = _mm512_xor_si512(_mm512_xor_si512(vt8, vt9), vtable1);
56 const __m512i vtableA = _mm512_xor_si512(_mm512_xor_si512(vt9, vtA), vtable2);
57 const __m512i vtableB = _mm512_xor_si512(_mm512_xor_si512(vtA, vtB), vtable3);
58 const __m512i vtableC = _mm512_xor_si512(_mm512_xor_si512(vtB, vtC), vtable4);
59 const __m512i vtableD = _mm512_xor_si512(_mm512_xor_si512(vtC, vtD), vtable5);
60 const __m512i vtableE = _mm512_xor_si512(_mm512_xor_si512(vtD, vtE), vtable6);
61 const __m512i vtableF = _mm512_xor_si512(_mm512_xor_si512(vtE, vtF), vtable7);
62
63 const __m512i voffset = _mm512_set1_epi8(16);
64 for (; n >= 128 * sizeof(uint8_t); n -= 128 * sizeof(uint8_t)) {
65 __m512i vx0 = _mm512_loadu_si512(x);
66 __m512i vx1 = _mm512_loadu_si512(x + 64);
67 x += 128;
68
69 __m512i vy0 = _mm512_shuffle_epi8(vtable0, vx0);
70 __m512i vy1 = _mm512_shuffle_epi8(vtable0, vx1);
71
72 vx0 = _mm512_sub_epi8(vx0, voffset);
73 vx1 = _mm512_sub_epi8(vx1, voffset);
74 vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable1, vx0));
75 vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable1, vx1));
76 vx0 = _mm512_sub_epi8(vx0, voffset);
77 vx1 = _mm512_sub_epi8(vx1, voffset);
78 vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable2, vx0));
79 vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable2, vx1));
80 vx0 = _mm512_sub_epi8(vx0, voffset);
81 vx1 = _mm512_sub_epi8(vx1, voffset);
82 vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable3, vx0));
83 vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable3, vx1));
84 vx0 = _mm512_sub_epi8(vx0, voffset);
85 vx1 = _mm512_sub_epi8(vx1, voffset);
86 vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable4, vx0));
87 vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable4, vx1));
88 vx0 = _mm512_sub_epi8(vx0, voffset);
89 vx1 = _mm512_sub_epi8(vx1, voffset);
90 vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable5, vx0));
91 vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable5, vx1));
92 vx0 = _mm512_sub_epi8(vx0, voffset);
93 vx1 = _mm512_sub_epi8(vx1, voffset);
94 vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable6, vx0));
95 vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable6, vx1));
96 vx0 = _mm512_sub_epi8(vx0, voffset);
97 vx1 = _mm512_sub_epi8(vx1, voffset);
98 vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable7, vx0));
99 vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable7, vx1));
100 vx0 = _mm512_sub_epi8(vx0, voffset);
101 vx1 = _mm512_sub_epi8(vx1, voffset);
102 vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable8, vx0));
103 vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable8, vx1));
104
105 vx0 = _mm512_subs_epi8(vx0, voffset);
106 vx1 = _mm512_subs_epi8(vx1, voffset);
107 vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtable9, vx0));
108 vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtable9, vx1));
109 vx0 = _mm512_subs_epi8(vx0, voffset);
110 vx1 = _mm512_subs_epi8(vx1, voffset);
111 vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtableA, vx0));
112 vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtableA, vx1));
113 vx0 = _mm512_subs_epi8(vx0, voffset);
114 vx1 = _mm512_subs_epi8(vx1, voffset);
115 vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtableB, vx0));
116 vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtableB, vx1));
117 vx0 = _mm512_subs_epi8(vx0, voffset);
118 vx1 = _mm512_subs_epi8(vx1, voffset);
119 vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtableC, vx0));
120 vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtableC, vx1));
121 vx0 = _mm512_subs_epi8(vx0, voffset);
122 vx1 = _mm512_subs_epi8(vx1, voffset);
123 vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtableD, vx0));
124 vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtableD, vx1));
125 vx0 = _mm512_subs_epi8(vx0, voffset);
126 vx1 = _mm512_subs_epi8(vx1, voffset);
127 vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtableE, vx0));
128 vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtableE, vx1));
129 vx0 = _mm512_subs_epi8(vx0, voffset);
130 vx1 = _mm512_subs_epi8(vx1, voffset);
131 vy0 = _mm512_xor_si512(vy0, _mm512_shuffle_epi8(vtableF, vx0));
132 vy1 = _mm512_xor_si512(vy1, _mm512_shuffle_epi8(vtableF, vx1));
133
134 _mm512_storeu_si512(y, vy0);
135 _mm512_storeu_si512(y + 64, vy1);
136 y += 128;
137 }
138 for (; n >= 64 * sizeof(uint8_t); n -= 64 * sizeof(uint8_t)) {
139 __m512i vx = _mm512_loadu_si512(x);
140 x += 64;
141
142 __m512i vy = _mm512_shuffle_epi8(vtable0, vx);
143
144 vx = _mm512_sub_epi8(vx, voffset);
145 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable1, vx));
146 vx = _mm512_sub_epi8(vx, voffset);
147 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable2, vx));
148 vx = _mm512_sub_epi8(vx, voffset);
149 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable3, vx));
150 vx = _mm512_sub_epi8(vx, voffset);
151 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable4, vx));
152 vx = _mm512_sub_epi8(vx, voffset);
153 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable5, vx));
154 vx = _mm512_sub_epi8(vx, voffset);
155 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable6, vx));
156 vx = _mm512_sub_epi8(vx, voffset);
157 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable7, vx));
158 vx = _mm512_sub_epi8(vx, voffset);
159 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable8, vx));
160
161 vx = _mm512_subs_epi8(vx, voffset);
162 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable9, vx));
163 vx = _mm512_subs_epi8(vx, voffset);
164 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableA, vx));
165 vx = _mm512_subs_epi8(vx, voffset);
166 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableB, vx));
167 vx = _mm512_subs_epi8(vx, voffset);
168 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableC, vx));
169 vx = _mm512_subs_epi8(vx, voffset);
170 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableD, vx));
171 vx = _mm512_subs_epi8(vx, voffset);
172 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableE, vx));
173 vx = _mm512_subs_epi8(vx, voffset);
174 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableF, vx));
175
176 _mm512_storeu_si512(y, vy);
177 y += 64;
178 }
179 if XNN_UNLIKELY(n != 0) {
180 assert(n < 64);
181 const __mmask64 vmask = _cvtu64_mask64((uint64_t) ((UINT64_C(1) << n) - UINT64_C(1)));
182
183 __m512i vx = _mm512_maskz_loadu_epi8(vmask, x);
184
185 __m512i vy = _mm512_shuffle_epi8(vtable0, vx);
186
187 vx = _mm512_sub_epi8(vx, voffset);
188 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable1, vx));
189 vx = _mm512_sub_epi8(vx, voffset);
190 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable2, vx));
191 vx = _mm512_sub_epi8(vx, voffset);
192 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable3, vx));
193 vx = _mm512_sub_epi8(vx, voffset);
194 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable4, vx));
195 vx = _mm512_sub_epi8(vx, voffset);
196 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable5, vx));
197 vx = _mm512_sub_epi8(vx, voffset);
198 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable6, vx));
199 vx = _mm512_sub_epi8(vx, voffset);
200 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable7, vx));
201 vx = _mm512_sub_epi8(vx, voffset);
202 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable8, vx));
203
204 vx = _mm512_subs_epi8(vx, voffset);
205 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable9, vx));
206 vx = _mm512_subs_epi8(vx, voffset);
207 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableA, vx));
208 vx = _mm512_subs_epi8(vx, voffset);
209 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableB, vx));
210 vx = _mm512_subs_epi8(vx, voffset);
211 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableC, vx));
212 vx = _mm512_subs_epi8(vx, voffset);
213 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableD, vx));
214 vx = _mm512_subs_epi8(vx, voffset);
215 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableE, vx));
216 vx = _mm512_subs_epi8(vx, voffset);
217 vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableF, vx));
218
219 _mm512_mask_storeu_epi8(y, vmask, vy);
220 }
221 }
222