xref: /aosp_15_r20/external/XNNPACK/src/x8-lut/gen/lut-avx-x48.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/x8-lut/ssse3.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 #include <xnnpack/lut.h>
17 
18 
xnn_x8_lut_ukernel__avx_x48(size_t n,const uint8_t * x,uint8_t * y,const uint8_t t[restrict XNN_MIN_ELEMENTS (256)])19 void xnn_x8_lut_ukernel__avx_x48(
20     size_t n,
21     const uint8_t* x,
22     uint8_t* y,
23     const uint8_t t[restrict XNN_MIN_ELEMENTS(256)])
24 {
25   assert(n != 0);
26   assert(x != NULL);
27   assert(y != NULL);
28 
29   const __m128i vt0 = _mm_load_si128((const __m128i*) t);
30   const __m128i vt1 = _mm_load_si128((const __m128i*) (t + 16));
31   const __m128i vt2 = _mm_load_si128((const __m128i*) (t + 32));
32   const __m128i vt3 = _mm_load_si128((const __m128i*) (t + 48));
33   const __m128i vt4 = _mm_load_si128((const __m128i*) (t + 64));
34   const __m128i vt5 = _mm_load_si128((const __m128i*) (t + 80));
35   const __m128i vt6 = _mm_load_si128((const __m128i*) (t + 96));
36   const __m128i vt7 = _mm_load_si128((const __m128i*) (t + 112));
37   const __m128i vt8 = _mm_load_si128((const __m128i*) (t + 128));
38   const __m128i vt9 = _mm_load_si128((const __m128i*) (t + 144));
39   const __m128i vtA = _mm_load_si128((const __m128i*) (t + 160));
40   const __m128i vtB = _mm_load_si128((const __m128i*) (t + 176));
41   const __m128i vtC = _mm_load_si128((const __m128i*) (t + 192));
42   const __m128i vtD = _mm_load_si128((const __m128i*) (t + 208));
43   const __m128i vtE = _mm_load_si128((const __m128i*) (t + 224));
44   const __m128i vtF = _mm_load_si128((const __m128i*) (t + 240));
45 
46   const __m128i vtable0 = vt0;
47   const __m128i vtable1 = _mm_xor_si128(vt0, vt1);
48   const __m128i vtable2 = _mm_xor_si128(vt1, vt2);
49   const __m128i vtable3 = _mm_xor_si128(vt2, vt3);
50   const __m128i vtable4 = _mm_xor_si128(vt3, vt4);
51   const __m128i vtable5 = _mm_xor_si128(vt4, vt5);
52   const __m128i vtable6 = _mm_xor_si128(vt5, vt6);
53   const __m128i vtable7 = _mm_xor_si128(vt6, vt7);
54   const __m128i vtable8 = _mm_xor_si128(_mm_xor_si128(vt7, vt8), vtable0);
55   const __m128i vtable9 = _mm_xor_si128(_mm_xor_si128(vt8, vt9), vtable1);
56   const __m128i vtableA = _mm_xor_si128(_mm_xor_si128(vt9, vtA), vtable2);
57   const __m128i vtableB = _mm_xor_si128(_mm_xor_si128(vtA, vtB), vtable3);
58   const __m128i vtableC = _mm_xor_si128(_mm_xor_si128(vtB, vtC), vtable4);
59   const __m128i vtableD = _mm_xor_si128(_mm_xor_si128(vtC, vtD), vtable5);
60   const __m128i vtableE = _mm_xor_si128(_mm_xor_si128(vtD, vtE), vtable6);
61   const __m128i vtableF = _mm_xor_si128(_mm_xor_si128(vtE, vtF), vtable7);
62 
63   const __m128i voffset = _mm_set1_epi8(16);
64   for (; n >= 48 * sizeof(uint8_t); n -= 48 * sizeof(uint8_t)) {
65     __m128i vx0 = _mm_loadu_si128((const __m128i*) x);
66     __m128i vx1 = _mm_loadu_si128((const __m128i*) (x + 16));
67     __m128i vx2 = _mm_loadu_si128((const __m128i*) (x + 32));
68     x += 48;
69 
70     __m128i vy0 = _mm_shuffle_epi8(vtable0, vx0);
71     __m128i vy1 = _mm_shuffle_epi8(vtable0, vx1);
72     __m128i vy2 = _mm_shuffle_epi8(vtable0, vx2);
73 
74     vx0 = _mm_sub_epi8(vx0, voffset);
75     vx1 = _mm_sub_epi8(vx1, voffset);
76     vx2 = _mm_sub_epi8(vx2, voffset);
77     vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable1, vx0));
78     vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable1, vx1));
79     vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtable1, vx2));
80     vx0 = _mm_sub_epi8(vx0, voffset);
81     vx1 = _mm_sub_epi8(vx1, voffset);
82     vx2 = _mm_sub_epi8(vx2, voffset);
83     vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable2, vx0));
84     vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable2, vx1));
85     vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtable2, vx2));
86     vx0 = _mm_sub_epi8(vx0, voffset);
87     vx1 = _mm_sub_epi8(vx1, voffset);
88     vx2 = _mm_sub_epi8(vx2, voffset);
89     vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable3, vx0));
90     vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable3, vx1));
91     vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtable3, vx2));
92     vx0 = _mm_sub_epi8(vx0, voffset);
93     vx1 = _mm_sub_epi8(vx1, voffset);
94     vx2 = _mm_sub_epi8(vx2, voffset);
95     vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable4, vx0));
96     vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable4, vx1));
97     vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtable4, vx2));
98     vx0 = _mm_sub_epi8(vx0, voffset);
99     vx1 = _mm_sub_epi8(vx1, voffset);
100     vx2 = _mm_sub_epi8(vx2, voffset);
101     vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable5, vx0));
102     vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable5, vx1));
103     vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtable5, vx2));
104     vx0 = _mm_sub_epi8(vx0, voffset);
105     vx1 = _mm_sub_epi8(vx1, voffset);
106     vx2 = _mm_sub_epi8(vx2, voffset);
107     vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable6, vx0));
108     vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable6, vx1));
109     vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtable6, vx2));
110     vx0 = _mm_sub_epi8(vx0, voffset);
111     vx1 = _mm_sub_epi8(vx1, voffset);
112     vx2 = _mm_sub_epi8(vx2, voffset);
113     vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable7, vx0));
114     vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable7, vx1));
115     vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtable7, vx2));
116     vx0 = _mm_sub_epi8(vx0, voffset);
117     vx1 = _mm_sub_epi8(vx1, voffset);
118     vx2 = _mm_sub_epi8(vx2, voffset);
119     vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable8, vx0));
120     vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable8, vx1));
121     vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtable8, vx2));
122 
123     vx0 = _mm_subs_epi8(vx0, voffset);
124     vx1 = _mm_subs_epi8(vx1, voffset);
125     vx2 = _mm_subs_epi8(vx2, voffset);
126     vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtable9, vx0));
127     vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtable9, vx1));
128     vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtable9, vx2));
129     vx0 = _mm_subs_epi8(vx0, voffset);
130     vx1 = _mm_subs_epi8(vx1, voffset);
131     vx2 = _mm_subs_epi8(vx2, voffset);
132     vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtableA, vx0));
133     vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtableA, vx1));
134     vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtableA, vx2));
135     vx0 = _mm_subs_epi8(vx0, voffset);
136     vx1 = _mm_subs_epi8(vx1, voffset);
137     vx2 = _mm_subs_epi8(vx2, voffset);
138     vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtableB, vx0));
139     vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtableB, vx1));
140     vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtableB, vx2));
141     vx0 = _mm_subs_epi8(vx0, voffset);
142     vx1 = _mm_subs_epi8(vx1, voffset);
143     vx2 = _mm_subs_epi8(vx2, voffset);
144     vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtableC, vx0));
145     vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtableC, vx1));
146     vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtableC, vx2));
147     vx0 = _mm_subs_epi8(vx0, voffset);
148     vx1 = _mm_subs_epi8(vx1, voffset);
149     vx2 = _mm_subs_epi8(vx2, voffset);
150     vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtableD, vx0));
151     vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtableD, vx1));
152     vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtableD, vx2));
153     vx0 = _mm_subs_epi8(vx0, voffset);
154     vx1 = _mm_subs_epi8(vx1, voffset);
155     vx2 = _mm_subs_epi8(vx2, voffset);
156     vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtableE, vx0));
157     vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtableE, vx1));
158     vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtableE, vx2));
159     vx0 = _mm_subs_epi8(vx0, voffset);
160     vx1 = _mm_subs_epi8(vx1, voffset);
161     vx2 = _mm_subs_epi8(vx2, voffset);
162     vy0 = _mm_xor_si128(vy0, _mm_shuffle_epi8(vtableF, vx0));
163     vy1 = _mm_xor_si128(vy1, _mm_shuffle_epi8(vtableF, vx1));
164     vy2 = _mm_xor_si128(vy2, _mm_shuffle_epi8(vtableF, vx2));
165 
166     _mm_storeu_si128((__m128i*) y, vy0);
167     _mm_storeu_si128((__m128i*) (y + 16), vy1);
168     _mm_storeu_si128((__m128i*) (y + 32), vy2);
169     y += 48;
170   }
171   for (; n >= 16 * sizeof(uint8_t); n -= 16 * sizeof(uint8_t)) {
172     __m128i vx = _mm_loadu_si128((const __m128i*) x);
173     x += 16;
174 
175     __m128i vy = _mm_shuffle_epi8(vtable0, vx);
176 
177     vx = _mm_sub_epi8(vx, voffset);
178     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable1, vx));
179     vx = _mm_sub_epi8(vx, voffset);
180     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable2, vx));
181     vx = _mm_sub_epi8(vx, voffset);
182     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable3, vx));
183     vx = _mm_sub_epi8(vx, voffset);
184     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable4, vx));
185     vx = _mm_sub_epi8(vx, voffset);
186     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable5, vx));
187     vx = _mm_sub_epi8(vx, voffset);
188     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable6, vx));
189     vx = _mm_sub_epi8(vx, voffset);
190     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable7, vx));
191     vx = _mm_sub_epi8(vx, voffset);
192     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable8, vx));
193 
194     vx = _mm_subs_epi8(vx, voffset);
195     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable9, vx));
196     vx = _mm_subs_epi8(vx, voffset);
197     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableA, vx));
198     vx = _mm_subs_epi8(vx, voffset);
199     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableB, vx));
200     vx = _mm_subs_epi8(vx, voffset);
201     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableC, vx));
202     vx = _mm_subs_epi8(vx, voffset);
203     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableD, vx));
204     vx = _mm_subs_epi8(vx, voffset);
205     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableE, vx));
206     vx = _mm_subs_epi8(vx, voffset);
207     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableF, vx));
208 
209     _mm_storeu_si128((__m128i*) y, vy);
210     y += 16;
211   }
212   if XNN_UNLIKELY(n != 0) {
213     __m128i vx = _mm_loadu_si128((const __m128i*) x);
214 
215     __m128i vy = _mm_shuffle_epi8(vtable0, vx);
216 
217     vx = _mm_sub_epi8(vx, voffset);
218     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable1, vx));
219     vx = _mm_sub_epi8(vx, voffset);
220     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable2, vx));
221     vx = _mm_sub_epi8(vx, voffset);
222     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable3, vx));
223     vx = _mm_sub_epi8(vx, voffset);
224     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable4, vx));
225     vx = _mm_sub_epi8(vx, voffset);
226     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable5, vx));
227     vx = _mm_sub_epi8(vx, voffset);
228     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable6, vx));
229     vx = _mm_sub_epi8(vx, voffset);
230     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable7, vx));
231     vx = _mm_sub_epi8(vx, voffset);
232     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable8, vx));
233 
234     vx = _mm_subs_epi8(vx, voffset);
235     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable9, vx));
236     vx = _mm_subs_epi8(vx, voffset);
237     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableA, vx));
238     vx = _mm_subs_epi8(vx, voffset);
239     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableB, vx));
240     vx = _mm_subs_epi8(vx, voffset);
241     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableC, vx));
242     vx = _mm_subs_epi8(vx, voffset);
243     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableD, vx));
244     vx = _mm_subs_epi8(vx, voffset);
245     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableE, vx));
246     vx = _mm_subs_epi8(vx, voffset);
247     vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtableF, vx));
248 
249     if (n & (8 * sizeof(uint8_t))) {
250       _mm_storel_epi64((__m128i*) y, vy);
251       vy = _mm_unpackhi_epi64(vy, vy);
252       y += 8;
253     }
254     if (n & (4 * sizeof(uint8_t))) {
255       _mm_storeu_si32(y, vy);
256       vy = _mm_srli_epi64(vy, 32);
257       y += 4;
258     }
259     if (n & (2 * sizeof(uint8_t))) {
260       _mm_storeu_si16(y, vy);
261       vy = _mm_srli_epi32(vy, 16);
262       y += 2;
263     }
264     if (n & (1 * sizeof(uint8_t))) {
265       *y = (uint8_t) _mm_extract_epi8(vy, 0);
266     }
267   }
268 }
269