xref: /aosp_15_r20/external/XNNPACK/src/x8-lut/gen/lut-avx512skx-vpshufb-x64.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/x8-lut/avx512skx-vpshufb.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/intrinsics-polyfill.h>
15 #include <xnnpack/lut.h>
16 #include <xnnpack/common.h>
17 
18 
xnn_x8_lut_ukernel__avx512skx_vpshufb_x64(size_t n,const uint8_t * x,uint8_t * y,const uint8_t t[restrict XNN_MIN_ELEMENTS (256)])19 void xnn_x8_lut_ukernel__avx512skx_vpshufb_x64(
20     size_t n,
21     const uint8_t* x,
22     uint8_t* y,
23     const uint8_t t[restrict XNN_MIN_ELEMENTS(256)])
24 {
25   assert(n != 0);
26   assert(x != NULL);
27   assert(y != NULL);
28 
29   const __m512i vt0 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) t));
30   const __m512i vt1 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 16)));
31   const __m512i vt2 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 32)));
32   const __m512i vt3 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 48)));
33   const __m512i vt4 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 64)));
34   const __m512i vt5 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 80)));
35   const __m512i vt6 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 96)));
36   const __m512i vt7 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 112)));
37   const __m512i vt8 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 128)));
38   const __m512i vt9 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 144)));
39   const __m512i vtA = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 160)));
40   const __m512i vtB = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 176)));
41   const __m512i vtC = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 192)));
42   const __m512i vtD = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 208)));
43   const __m512i vtE = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 224)));
44   const __m512i vtF = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + 240)));
45 
46   const __m512i vtable0 = vt0;
47   const __m512i vtable1 = _mm512_xor_si512(vt0, vt1);
48   const __m512i vtable2 = _mm512_xor_si512(vt1, vt2);
49   const __m512i vtable3 = _mm512_xor_si512(vt2, vt3);
50   const __m512i vtable4 = _mm512_xor_si512(vt3, vt4);
51   const __m512i vtable5 = _mm512_xor_si512(vt4, vt5);
52   const __m512i vtable6 = _mm512_xor_si512(vt5, vt6);
53   const __m512i vtable7 = _mm512_xor_si512(vt6, vt7);
54   const __m512i vtable8 = _mm512_xor_si512(_mm512_xor_si512(vt7, vt8), vtable0);
55   const __m512i vtable9 = _mm512_xor_si512(_mm512_xor_si512(vt8, vt9), vtable1);
56   const __m512i vtableA = _mm512_xor_si512(_mm512_xor_si512(vt9, vtA), vtable2);
57   const __m512i vtableB = _mm512_xor_si512(_mm512_xor_si512(vtA, vtB), vtable3);
58   const __m512i vtableC = _mm512_xor_si512(_mm512_xor_si512(vtB, vtC), vtable4);
59   const __m512i vtableD = _mm512_xor_si512(_mm512_xor_si512(vtC, vtD), vtable5);
60   const __m512i vtableE = _mm512_xor_si512(_mm512_xor_si512(vtD, vtE), vtable6);
61   const __m512i vtableF = _mm512_xor_si512(_mm512_xor_si512(vtE, vtF), vtable7);
62 
63   const __m512i voffset = _mm512_set1_epi8(16);
64   for (; n >= 64 * sizeof(uint8_t); n -= 64 * sizeof(uint8_t)) {
65     __m512i vx = _mm512_loadu_si512(x);
66     x += 64;
67 
68     __m512i vy = _mm512_shuffle_epi8(vtable0, vx);
69 
70     vx = _mm512_sub_epi8(vx, voffset);
71     vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable1, vx));
72     vx = _mm512_sub_epi8(vx, voffset);
73     vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable2, vx));
74     vx = _mm512_sub_epi8(vx, voffset);
75     vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable3, vx));
76     vx = _mm512_sub_epi8(vx, voffset);
77     vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable4, vx));
78     vx = _mm512_sub_epi8(vx, voffset);
79     vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable5, vx));
80     vx = _mm512_sub_epi8(vx, voffset);
81     vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable6, vx));
82     vx = _mm512_sub_epi8(vx, voffset);
83     vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable7, vx));
84     vx = _mm512_sub_epi8(vx, voffset);
85     vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable8, vx));
86 
87     vx = _mm512_subs_epi8(vx, voffset);
88     vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable9, vx));
89     vx = _mm512_subs_epi8(vx, voffset);
90     vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableA, vx));
91     vx = _mm512_subs_epi8(vx, voffset);
92     vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableB, vx));
93     vx = _mm512_subs_epi8(vx, voffset);
94     vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableC, vx));
95     vx = _mm512_subs_epi8(vx, voffset);
96     vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableD, vx));
97     vx = _mm512_subs_epi8(vx, voffset);
98     vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableE, vx));
99     vx = _mm512_subs_epi8(vx, voffset);
100     vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableF, vx));
101 
102     _mm512_storeu_si512(y, vy);
103     y += 64;
104   }
105   if XNN_UNLIKELY(n != 0) {
106     assert(n < 64);
107     const __mmask64 vmask = _cvtu64_mask64((uint64_t) ((UINT64_C(1) << n) - UINT64_C(1)));
108 
109     __m512i vx = _mm512_maskz_loadu_epi8(vmask, x);
110 
111     __m512i vy = _mm512_shuffle_epi8(vtable0, vx);
112 
113     vx = _mm512_sub_epi8(vx, voffset);
114     vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable1, vx));
115     vx = _mm512_sub_epi8(vx, voffset);
116     vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable2, vx));
117     vx = _mm512_sub_epi8(vx, voffset);
118     vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable3, vx));
119     vx = _mm512_sub_epi8(vx, voffset);
120     vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable4, vx));
121     vx = _mm512_sub_epi8(vx, voffset);
122     vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable5, vx));
123     vx = _mm512_sub_epi8(vx, voffset);
124     vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable6, vx));
125     vx = _mm512_sub_epi8(vx, voffset);
126     vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable7, vx));
127     vx = _mm512_sub_epi8(vx, voffset);
128     vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable8, vx));
129 
130     vx = _mm512_subs_epi8(vx, voffset);
131     vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable9, vx));
132     vx = _mm512_subs_epi8(vx, voffset);
133     vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableA, vx));
134     vx = _mm512_subs_epi8(vx, voffset);
135     vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableB, vx));
136     vx = _mm512_subs_epi8(vx, voffset);
137     vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableC, vx));
138     vx = _mm512_subs_epi8(vx, voffset);
139     vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableD, vx));
140     vx = _mm512_subs_epi8(vx, voffset);
141     vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableE, vx));
142     vx = _mm512_subs_epi8(vx, voffset);
143     vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtableF, vx));
144 
145     _mm512_mask_storeu_epi8(y, vmask, vy);
146   }
147 }
148