xref: /aosp_15_r20/external/XNNPACK/src/x8-lut/gen/lut-avx2-x64.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/x8-lut/avx2.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/intrinsics-polyfill.h>
15 #include <xnnpack/lut.h>
16 #include <xnnpack/common.h>
17 
18 
xnn_x8_lut_ukernel__avx2_x64(size_t n,const uint8_t * x,uint8_t * y,const uint8_t t[restrict XNN_MIN_ELEMENTS (256)])19 void xnn_x8_lut_ukernel__avx2_x64(
20     size_t n,
21     const uint8_t* x,
22     uint8_t* y,
23     const uint8_t t[restrict XNN_MIN_ELEMENTS(256)])
24 {
25   assert(n != 0);
26   assert(x != NULL);
27   assert(y != NULL);
28 
29   const __m256i vt0 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) t));
30   const __m256i vt1 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 16)));
31   const __m256i vt2 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 32)));
32   const __m256i vt3 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 48)));
33   const __m256i vt4 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 64)));
34   const __m256i vt5 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 80)));
35   const __m256i vt6 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 96)));
36   const __m256i vt7 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 112)));
37   const __m256i vt8 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 128)));
38   const __m256i vt9 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 144)));
39   const __m256i vtA = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 160)));
40   const __m256i vtB = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 176)));
41   const __m256i vtC = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 192)));
42   const __m256i vtD = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 208)));
43   const __m256i vtE = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 224)));
44   const __m256i vtF = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + 240)));
45 
46   const __m256i vtable0 = vt0;
47   const __m256i vtable1 = _mm256_xor_si256(vt0, vt1);
48   const __m256i vtable2 = _mm256_xor_si256(vt1, vt2);
49   const __m256i vtable3 = _mm256_xor_si256(vt2, vt3);
50   const __m256i vtable4 = _mm256_xor_si256(vt3, vt4);
51   const __m256i vtable5 = _mm256_xor_si256(vt4, vt5);
52   const __m256i vtable6 = _mm256_xor_si256(vt5, vt6);
53   const __m256i vtable7 = _mm256_xor_si256(vt6, vt7);
54   const __m256i vtable8 = _mm256_xor_si256(_mm256_xor_si256(vt7, vt8), vtable0);
55   const __m256i vtable9 = _mm256_xor_si256(_mm256_xor_si256(vt8, vt9), vtable1);
56   const __m256i vtableA = _mm256_xor_si256(_mm256_xor_si256(vt9, vtA), vtable2);
57   const __m256i vtableB = _mm256_xor_si256(_mm256_xor_si256(vtA, vtB), vtable3);
58   const __m256i vtableC = _mm256_xor_si256(_mm256_xor_si256(vtB, vtC), vtable4);
59   const __m256i vtableD = _mm256_xor_si256(_mm256_xor_si256(vtC, vtD), vtable5);
60   const __m256i vtableE = _mm256_xor_si256(_mm256_xor_si256(vtD, vtE), vtable6);
61   const __m256i vtableF = _mm256_xor_si256(_mm256_xor_si256(vtE, vtF), vtable7);
62 
63   const __m256i voffset = _mm256_set1_epi8(16);
64   for (; n >= 64 * sizeof(uint8_t); n -= 64 * sizeof(uint8_t)) {
65     __m256i vx0 = _mm256_loadu_si256((const __m256i*) x);
66     __m256i vx1 = _mm256_loadu_si256((const __m256i*) (x + 32));
67     x += 64;
68 
69     __m256i vy0 = _mm256_shuffle_epi8(vtable0, vx0);
70     __m256i vy1 = _mm256_shuffle_epi8(vtable0, vx1);
71 
72     vx0 = _mm256_sub_epi8(vx0, voffset);
73     vx1 = _mm256_sub_epi8(vx1, voffset);
74     vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable1, vx0));
75     vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable1, vx1));
76     vx0 = _mm256_sub_epi8(vx0, voffset);
77     vx1 = _mm256_sub_epi8(vx1, voffset);
78     vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable2, vx0));
79     vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable2, vx1));
80     vx0 = _mm256_sub_epi8(vx0, voffset);
81     vx1 = _mm256_sub_epi8(vx1, voffset);
82     vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable3, vx0));
83     vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable3, vx1));
84     vx0 = _mm256_sub_epi8(vx0, voffset);
85     vx1 = _mm256_sub_epi8(vx1, voffset);
86     vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable4, vx0));
87     vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable4, vx1));
88     vx0 = _mm256_sub_epi8(vx0, voffset);
89     vx1 = _mm256_sub_epi8(vx1, voffset);
90     vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable5, vx0));
91     vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable5, vx1));
92     vx0 = _mm256_sub_epi8(vx0, voffset);
93     vx1 = _mm256_sub_epi8(vx1, voffset);
94     vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable6, vx0));
95     vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable6, vx1));
96     vx0 = _mm256_sub_epi8(vx0, voffset);
97     vx1 = _mm256_sub_epi8(vx1, voffset);
98     vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable7, vx0));
99     vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable7, vx1));
100     vx0 = _mm256_sub_epi8(vx0, voffset);
101     vx1 = _mm256_sub_epi8(vx1, voffset);
102     vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable8, vx0));
103     vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable8, vx1));
104 
105     vx0 = _mm256_subs_epi8(vx0, voffset);
106     vx1 = _mm256_subs_epi8(vx1, voffset);
107     vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtable9, vx0));
108     vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtable9, vx1));
109     vx0 = _mm256_subs_epi8(vx0, voffset);
110     vx1 = _mm256_subs_epi8(vx1, voffset);
111     vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableA, vx0));
112     vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtableA, vx1));
113     vx0 = _mm256_subs_epi8(vx0, voffset);
114     vx1 = _mm256_subs_epi8(vx1, voffset);
115     vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableB, vx0));
116     vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtableB, vx1));
117     vx0 = _mm256_subs_epi8(vx0, voffset);
118     vx1 = _mm256_subs_epi8(vx1, voffset);
119     vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableC, vx0));
120     vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtableC, vx1));
121     vx0 = _mm256_subs_epi8(vx0, voffset);
122     vx1 = _mm256_subs_epi8(vx1, voffset);
123     vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableD, vx0));
124     vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtableD, vx1));
125     vx0 = _mm256_subs_epi8(vx0, voffset);
126     vx1 = _mm256_subs_epi8(vx1, voffset);
127     vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableE, vx0));
128     vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtableE, vx1));
129     vx0 = _mm256_subs_epi8(vx0, voffset);
130     vx1 = _mm256_subs_epi8(vx1, voffset);
131     vy0 = _mm256_xor_si256(vy0, _mm256_shuffle_epi8(vtableF, vx0));
132     vy1 = _mm256_xor_si256(vy1, _mm256_shuffle_epi8(vtableF, vx1));
133 
134     _mm256_storeu_si256((__m256i*) y, vy0);
135     _mm256_storeu_si256((__m256i*) (y + 32), vy1);
136     y += 64;
137   }
138   for (; n >= 16 * sizeof(uint8_t); n -= 16 * sizeof(uint8_t)) {
139     __m128i vx = _mm_loadu_si128((const __m128i*) x);
140     x += 16;
141 
142     __m128i vy = _mm_shuffle_epi8(_mm256_castsi256_si128(vtable0), vx);
143 
144     vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
145     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable1), vx));
146     vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
147     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable2), vx));
148     vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
149     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable3), vx));
150     vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
151     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable4), vx));
152     vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
153     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable5), vx));
154     vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
155     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable6), vx));
156     vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
157     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable7), vx));
158     vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
159     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable8), vx));
160 
161     vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
162     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable9), vx));
163     vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
164     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableA), vx));
165     vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
166     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableB), vx));
167     vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
168     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableC), vx));
169     vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
170     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableD), vx));
171     vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
172     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableE), vx));
173     vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
174     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableF), vx));
175 
176     _mm_storeu_si128((__m128i*) y, vy);
177     y += 16;
178   }
179   if XNN_UNLIKELY(n != 0) {
180     __m128i vx = _mm_loadu_si128((const __m128i*) x);
181 
182     __m128i vy = _mm_shuffle_epi8(_mm256_castsi256_si128(vtable0), vx);
183 
184     vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
185     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable1), vx));
186     vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
187     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable2), vx));
188     vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
189     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable3), vx));
190     vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
191     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable4), vx));
192     vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
193     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable5), vx));
194     vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
195     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable6), vx));
196     vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
197     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable7), vx));
198     vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset));
199     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable8), vx));
200 
201     vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
202     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable9), vx));
203     vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
204     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableA), vx));
205     vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
206     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableB), vx));
207     vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
208     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableC), vx));
209     vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
210     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableD), vx));
211     vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
212     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableE), vx));
213     vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset));
214     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtableF), vx));
215 
216     if (n & (8 * sizeof(uint8_t))) {
217       _mm_storel_epi64((__m128i*) y, vy);
218       vy = _mm_unpackhi_epi64(vy, vy);
219       y += 8;
220     }
221     if (n & (4 * sizeof(uint8_t))) {
222       _mm_storeu_si32(y, vy);
223       vy = _mm_srli_epi64(vy, 32);
224       y += 4;
225     }
226     if (n & (2 * sizeof(uint8_t))) {
227       _mm_storeu_si16(y, vy);
228       vy = _mm_srli_epi32(vy, 16);
229       y += 2;
230     }
231     if (n & (1 * sizeof(uint8_t))) {
232       *y = (uint8_t) _mm_extract_epi8(vy, 0);
233     }
234   }
235 }
236