xref: /aosp_15_r20/external/XNNPACK/src/qc8-dwconv/gen/up16x9-minmax-fp32-avx2-mul32.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-dwconv/unipass-avx2-mul32.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/unaligned.h>
16 
17 
xnn_qc8_dwconv_minmax_fp32_ukernel_up16x9__avx2_mul32(size_t channels,size_t output_width,const int8_t ** input,const void * weights,int8_t * output,size_t input_stride,size_t output_increment,size_t input_offset,const int8_t * zero,const union xnn_qc8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qc8_dwconv_minmax_fp32_ukernel_up16x9__avx2_mul32(
19     size_t channels,
20     size_t output_width,
21     const int8_t** input,
22     const void* weights,
23     int8_t* output,
24     size_t input_stride,
25     size_t output_increment,
26     size_t input_offset,
27     const int8_t* zero,
28     const union xnn_qc8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
29 {
30   assert(channels != 0);
31   assert(output_width != 0);
32 
33   do {
34     const int8_t* i0 = input[0];
35     assert(i0 != NULL);
36     if XNN_UNPREDICTABLE(i0 != zero) {
37       i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
38     }
39     const int8_t* i1 = input[1];
40     assert(i1 != NULL);
41     if XNN_UNPREDICTABLE(i1 != zero) {
42       i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
43     }
44     const int8_t* i2 = input[2];
45     assert(i2 != NULL);
46     if XNN_UNPREDICTABLE(i2 != zero) {
47       i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
48     }
49     const int8_t* i3 = input[3];
50     assert(i3 != NULL);
51     if XNN_UNPREDICTABLE(i3 != zero) {
52       i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
53     }
54     const int8_t* i4 = input[4];
55     assert(i4 != NULL);
56     if XNN_UNPREDICTABLE(i4 != zero) {
57       i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
58     }
59     const int8_t* i5 = input[5];
60     assert(i5 != NULL);
61     if XNN_UNPREDICTABLE(i5 != zero) {
62       i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
63     }
64     const int8_t* i6 = input[6];
65     assert(i6 != NULL);
66     if XNN_UNPREDICTABLE(i6 != zero) {
67       i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
68     }
69     const int8_t* i7 = input[7];
70     assert(i7 != NULL);
71     if XNN_UNPREDICTABLE(i7 != zero) {
72       i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
73     }
74     const int8_t* i8 = input[8];
75     assert(i8 != NULL);
76     if XNN_UNPREDICTABLE(i8 != zero) {
77       i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
78     }
79     input = (const int8_t**) ((uintptr_t) input + input_stride);
80 
81     size_t c = channels;
82     const void* w = weights;
83     for (; c >= 16; c -= 16) {
84       __m256i vacc01234567 = _mm256_loadu_si256((const __m256i*) w);
85       __m256i vacc89ABCDEF = _mm256_loadu_si256((const __m256i*) ((const int32_t*) w + 8));
86 
87 
88       const __m256i vi0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i0));
89       const __m256i vk0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 0 * sizeof(int8_t))));
90       const __m256i vi0x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i0 + 8)));
91       const __m256i vk0x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 8 * sizeof(int8_t))));
92       i0 += 16;
93 
94       vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi0x01234567, vk0x01234567));
95       vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi0x89ABCDEF, vk0x89ABCDEF));
96 
97       const __m256i vi1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i1));
98       const __m256i vk1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 16 * sizeof(int8_t))));
99       const __m256i vi1x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i1 + 8)));
100       const __m256i vk1x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 24 * sizeof(int8_t))));
101       i1 += 16;
102 
103       vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi1x01234567, vk1x01234567));
104       vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi1x89ABCDEF, vk1x89ABCDEF));
105 
106       const __m256i vi2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i2));
107       const __m256i vk2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 32 * sizeof(int8_t))));
108       const __m256i vi2x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i2 + 8)));
109       const __m256i vk2x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 40 * sizeof(int8_t))));
110       i2 += 16;
111 
112       vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi2x01234567, vk2x01234567));
113       vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi2x89ABCDEF, vk2x89ABCDEF));
114 
115       const __m256i vi3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i3));
116       const __m256i vk3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 48 * sizeof(int8_t))));
117       const __m256i vi3x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i3 + 8)));
118       const __m256i vk3x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 56 * sizeof(int8_t))));
119       i3 += 16;
120 
121       vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi3x01234567, vk3x01234567));
122       vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi3x89ABCDEF, vk3x89ABCDEF));
123 
124       const __m256i vi4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i4));
125       const __m256i vk4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 64 * sizeof(int8_t))));
126       const __m256i vi4x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i4 + 8)));
127       const __m256i vk4x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 72 * sizeof(int8_t))));
128       i4 += 16;
129 
130       vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi4x01234567, vk4x01234567));
131       vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi4x89ABCDEF, vk4x89ABCDEF));
132 
133       const __m256i vi5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i5));
134       const __m256i vk5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 80 * sizeof(int8_t))));
135       const __m256i vi5x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i5 + 8)));
136       const __m256i vk5x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 88 * sizeof(int8_t))));
137       i5 += 16;
138 
139       vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi5x01234567, vk5x01234567));
140       vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi5x89ABCDEF, vk5x89ABCDEF));
141 
142       const __m256i vi6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i6));
143       const __m256i vk6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 96 * sizeof(int8_t))));
144       const __m256i vi6x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i6 + 8)));
145       const __m256i vk6x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 104 * sizeof(int8_t))));
146       i6 += 16;
147 
148       vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi6x01234567, vk6x01234567));
149       vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi6x89ABCDEF, vk6x89ABCDEF));
150 
151       const __m256i vi7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i7));
152       const __m256i vk7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 112 * sizeof(int8_t))));
153       const __m256i vi7x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i7 + 8)));
154       const __m256i vk7x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 120 * sizeof(int8_t))));
155       i7 += 16;
156 
157       vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi7x01234567, vk7x01234567));
158       vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi7x89ABCDEF, vk7x89ABCDEF));
159 
160       const __m256i vi8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i8));
161       const __m256i vk8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 128 * sizeof(int8_t))));
162       const __m256i vi8x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (i8 + 8)));
163       const __m256i vk8x89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 136 * sizeof(int8_t))));
164       i8 += 16;
165 
166       vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi8x01234567, vk8x01234567));
167       vacc89ABCDEF = _mm256_add_epi32(vacc89ABCDEF, _mm256_mullo_epi32(vi8x89ABCDEF, vk8x89ABCDEF));
168 
169       w = (const void*) ((uintptr_t) w + 16 * sizeof(int32_t) + 144 * sizeof(int8_t));
170 
171       __m256 vscaled01234567 = _mm256_cvtepi32_ps(vacc01234567);
172       __m256 vscaled89ABCDEF = _mm256_cvtepi32_ps(vacc89ABCDEF);
173 
174       const __m256 vscale01234567 = _mm256_loadu_ps((const float*) w);
175       const __m256 vscale89ABCDEF = _mm256_loadu_ps((const float*) w + 8);
176       w = (const void*) ((const float*) w + 16);
177       vscaled01234567 = _mm256_mul_ps(vscaled01234567, vscale01234567);
178       vscaled89ABCDEF = _mm256_mul_ps(vscaled89ABCDEF, vscale89ABCDEF);
179 
180       const __m256 voutput_max_less_zero_point = _mm256_load_ps(params->fp32_avx2.output_max_less_zero_point);
181       vscaled01234567 = _mm256_min_ps(vscaled01234567, voutput_max_less_zero_point);
182       vscaled89ABCDEF = _mm256_min_ps(vscaled89ABCDEF, voutput_max_less_zero_point);
183 
184       vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
185       vacc89ABCDEF = _mm256_cvtps_epi32(vscaled89ABCDEF);
186 
187       const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->fp32_avx2.output_zero_point);
188       __m256i vout012389AB4567CDEF = _mm256_adds_epi16(_mm256_packs_epi32(vacc01234567, vacc89ABCDEF), voutput_zero_point);
189 
190       __m128i vout0123456789ABCDEF = _mm_shuffle_epi32(_mm_packs_epi16(_mm256_castsi256_si128(vout012389AB4567CDEF), _mm256_extracti128_si256(vout012389AB4567CDEF, 1)), _MM_SHUFFLE(3, 1, 2, 0));
191 
192       const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_avx2.output_min);
193       vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
194 
195       _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
196       output += 16;
197     }
198     if XNN_UNLIKELY(c != 0) {
199       const int8_t* k = (const int8_t*) ((const int32_t*) w + 16);
200       do {
201         __m256i vacc01234567 = _mm256_loadu_si256((const __m256i*) w);
202 
203 
204         const __m256i vi0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i0));
205         const __m256i vk0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) k));
206         i0 += 8;
207 
208         vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi0x01234567, vk0x01234567));
209 
210         const __m256i vi1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i1));
211         const __m256i vk1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 16)));
212         i1 += 8;
213 
214         vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi1x01234567, vk1x01234567));
215 
216         const __m256i vi2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i2));
217         const __m256i vk2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 32)));
218         i2 += 8;
219 
220         vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi2x01234567, vk2x01234567));
221 
222         const __m256i vi3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i3));
223         const __m256i vk3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 48)));
224         i3 += 8;
225 
226         vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi3x01234567, vk3x01234567));
227 
228         const __m256i vi4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i4));
229         const __m256i vk4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 64)));
230         i4 += 8;
231 
232         vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi4x01234567, vk4x01234567));
233 
234         const __m256i vi5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i5));
235         const __m256i vk5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 80)));
236         i5 += 8;
237 
238         vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi5x01234567, vk5x01234567));
239 
240         const __m256i vi6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i6));
241         const __m256i vk6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 96)));
242         i6 += 8;
243 
244         vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi6x01234567, vk6x01234567));
245 
246         const __m256i vi7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i7));
247         const __m256i vk7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 112)));
248         i7 += 8;
249 
250         vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi7x01234567, vk7x01234567));
251 
252         const __m256i vi8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i8));
253         const __m256i vk8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (k + 128)));
254         i8 += 8;
255 
256         vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi8x01234567, vk8x01234567));
257 
258         k += 8;
259 
260         __m256 vscaled01234567 = _mm256_cvtepi32_ps(vacc01234567);
261         const __m256 vscale01234567 = _mm256_loadu_ps((const float*) ((uintptr_t) w + 16 * sizeof(int32_t) + 144 * sizeof(int8_t)));
262         vscaled01234567 = _mm256_mul_ps(vscaled01234567, vscale01234567);
263         vscaled01234567 = _mm256_min_ps(vscaled01234567, _mm256_load_ps(params->fp32_avx2.output_max_less_zero_point));
264         vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
265 
266         w = (const void*) ((const int32_t*) w + 8);
267 
268         const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_avx2.output_zero_point);
269         __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
270 
271         __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
272 
273         const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_avx2.output_min);
274         vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
275 
276         if XNN_LIKELY(c >= 8) {
277           _mm_storel_epi64((__m128i*) output, vout0123456701234567);
278           output += 8;
279           c -= 8;
280         } else {
281           if (c & 4) {
282             unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
283             vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
284             output += 4;
285           }
286           if (c & 2) {
287             unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
288             vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
289             output += 2;
290           }
291           if (c & 1) {
292             *output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
293             output += 1;
294           }
295           c = 0;
296         }
297       } while (c != 0);
298     }
299 
300     output = (int8_t*) ((uintptr_t) output + output_increment);
301   } while (--output_width != 0);
302 }
303