xref: /aosp_15_r20/external/XNNPACK/src/qu8-dwconv/gen/up8x25-minmax-fp32-avx2-mul32.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-dwconv/unipass-avx2-mul32.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/unaligned.h>
16 
17 
xnn_qu8_dwconv_minmax_fp32_ukernel_up8x25__avx2_mul32(size_t channels,size_t output_width,const uint8_t ** input,const void * weights,uint8_t * output,size_t input_stride,size_t output_increment,size_t input_offset,const uint8_t * zero,const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qu8_dwconv_minmax_fp32_ukernel_up8x25__avx2_mul32(
19     size_t channels,
20     size_t output_width,
21     const uint8_t** input,
22     const void* weights,
23     uint8_t* output,
24     size_t input_stride,
25     size_t output_increment,
26     size_t input_offset,
27     const uint8_t* zero,
28     const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
29 {
30   assert(channels != 0);
31   assert(output_width != 0);
32 
33   const __m256i vk_zero_point = _mm256_cvtepu16_epi32(_mm_load_si128((const __m128i*) params->fp32_avx2.kernel_zero_point));
34   do {
35     const uint8_t* i0 = input[0];
36     assert(i0 != NULL);
37     if XNN_UNPREDICTABLE(i0 != zero) {
38       i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
39     }
40     const uint8_t* i1 = input[1];
41     assert(i1 != NULL);
42     if XNN_UNPREDICTABLE(i1 != zero) {
43       i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
44     }
45     const uint8_t* i2 = input[2];
46     assert(i2 != NULL);
47     if XNN_UNPREDICTABLE(i2 != zero) {
48       i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
49     }
50     const uint8_t* i3 = input[3];
51     assert(i3 != NULL);
52     if XNN_UNPREDICTABLE(i3 != zero) {
53       i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
54     }
55     const uint8_t* i4 = input[4];
56     assert(i4 != NULL);
57     if XNN_UNPREDICTABLE(i4 != zero) {
58       i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
59     }
60     const uint8_t* i5 = input[5];
61     assert(i5 != NULL);
62     if XNN_UNPREDICTABLE(i5 != zero) {
63       i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
64     }
65     const uint8_t* i6 = input[6];
66     assert(i6 != NULL);
67     if XNN_UNPREDICTABLE(i6 != zero) {
68       i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
69     }
70     const uint8_t* i7 = input[7];
71     assert(i7 != NULL);
72     if XNN_UNPREDICTABLE(i7 != zero) {
73       i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
74     }
75     const uint8_t* i8 = input[8];
76     assert(i8 != NULL);
77     if XNN_UNPREDICTABLE(i8 != zero) {
78       i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
79     }
80     const uint8_t* i9 = input[9];
81     assert(i9 != NULL);
82     if XNN_UNPREDICTABLE(i9 != zero) {
83       i9 = (const uint8_t*) ((uintptr_t) i9 + input_offset);
84     }
85     const uint8_t* i10 = input[10];
86     assert(i10 != NULL);
87     if XNN_UNPREDICTABLE(i10 != zero) {
88       i10 = (const uint8_t*) ((uintptr_t) i10 + input_offset);
89     }
90     const uint8_t* i11 = input[11];
91     assert(i11 != NULL);
92     if XNN_UNPREDICTABLE(i11 != zero) {
93       i11 = (const uint8_t*) ((uintptr_t) i11 + input_offset);
94     }
95     const uint8_t* i12 = input[12];
96     assert(i12 != NULL);
97     if XNN_UNPREDICTABLE(i12 != zero) {
98       i12 = (const uint8_t*) ((uintptr_t) i12 + input_offset);
99     }
100     const uint8_t* i13 = input[13];
101     assert(i13 != NULL);
102     if XNN_UNPREDICTABLE(i13 != zero) {
103       i13 = (const uint8_t*) ((uintptr_t) i13 + input_offset);
104     }
105     const uint8_t* i14 = input[14];
106     assert(i14 != NULL);
107     if XNN_UNPREDICTABLE(i14 != zero) {
108       i14 = (const uint8_t*) ((uintptr_t) i14 + input_offset);
109     }
110     const uint8_t* i15 = input[15];
111     assert(i15 != NULL);
112     if XNN_UNPREDICTABLE(i15 != zero) {
113       i15 = (const uint8_t*) ((uintptr_t) i15 + input_offset);
114     }
115     const uint8_t* i16 = input[16];
116     assert(i16 != NULL);
117     if XNN_UNPREDICTABLE(i16 != zero) {
118       i16 = (const uint8_t*) ((uintptr_t) i16 + input_offset);
119     }
120     const uint8_t* i17 = input[17];
121     assert(i17 != NULL);
122     if XNN_UNPREDICTABLE(i17 != zero) {
123       i17 = (const uint8_t*) ((uintptr_t) i17 + input_offset);
124     }
125     const uint8_t* i18 = input[18];
126     assert(i18 != NULL);
127     if XNN_UNPREDICTABLE(i18 != zero) {
128       i18 = (const uint8_t*) ((uintptr_t) i18 + input_offset);
129     }
130     const uint8_t* i19 = input[19];
131     assert(i19 != NULL);
132     if XNN_UNPREDICTABLE(i19 != zero) {
133       i19 = (const uint8_t*) ((uintptr_t) i19 + input_offset);
134     }
135     const uint8_t* i20 = input[20];
136     assert(i20 != NULL);
137     if XNN_UNPREDICTABLE(i20 != zero) {
138       i20 = (const uint8_t*) ((uintptr_t) i20 + input_offset);
139     }
140     const uint8_t* i21 = input[21];
141     assert(i21 != NULL);
142     if XNN_UNPREDICTABLE(i21 != zero) {
143       i21 = (const uint8_t*) ((uintptr_t) i21 + input_offset);
144     }
145     const uint8_t* i22 = input[22];
146     assert(i22 != NULL);
147     if XNN_UNPREDICTABLE(i22 != zero) {
148       i22 = (const uint8_t*) ((uintptr_t) i22 + input_offset);
149     }
150     const uint8_t* i23 = input[23];
151     assert(i23 != NULL);
152     if XNN_UNPREDICTABLE(i23 != zero) {
153       i23 = (const uint8_t*) ((uintptr_t) i23 + input_offset);
154     }
155     const uint8_t* i24 = input[24];
156     assert(i24 != NULL);
157     if XNN_UNPREDICTABLE(i24 != zero) {
158       i24 = (const uint8_t*) ((uintptr_t) i24 + input_offset);
159     }
160     input = (const uint8_t**) ((uintptr_t) input + input_stride);
161 
162     size_t c = channels;
163     const void* w = weights;
164     for (; c >= 8; c -= 8) {
165       __m256i vacc01234567 = _mm256_loadu_si256((const __m256i*) w);
166 
167 
168       const __m256i vi0x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i0));
169       const __m256i vk0x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(uint8_t)))), vk_zero_point);
170       i0 += 8;
171 
172       vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi0x01234567, vk0x01234567));
173 
174       const __m256i vi1x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i1));
175       const __m256i vk1x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(uint8_t)))), vk_zero_point);
176       i1 += 8;
177 
178       vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi1x01234567, vk1x01234567));
179 
180       const __m256i vi2x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i2));
181       const __m256i vk2x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(uint8_t)))), vk_zero_point);
182       i2 += 8;
183 
184       vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi2x01234567, vk2x01234567));
185 
186       const __m256i vi3x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i3));
187       const __m256i vk3x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(uint8_t)))), vk_zero_point);
188       i3 += 8;
189 
190       vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi3x01234567, vk3x01234567));
191 
192       const __m256i vi4x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i4));
193       const __m256i vk4x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(uint8_t)))), vk_zero_point);
194       i4 += 8;
195 
196       vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi4x01234567, vk4x01234567));
197 
198       const __m256i vi5x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i5));
199       const __m256i vk5x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(uint8_t)))), vk_zero_point);
200       i5 += 8;
201 
202       vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi5x01234567, vk5x01234567));
203 
204       const __m256i vi6x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i6));
205       const __m256i vk6x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(uint8_t)))), vk_zero_point);
206       i6 += 8;
207 
208       vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi6x01234567, vk6x01234567));
209 
210       const __m256i vi7x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i7));
211       const __m256i vk7x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(uint8_t)))), vk_zero_point);
212       i7 += 8;
213 
214       vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi7x01234567, vk7x01234567));
215 
216       const __m256i vi8x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i8));
217       const __m256i vk8x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(uint8_t)))), vk_zero_point);
218       i8 += 8;
219 
220       vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi8x01234567, vk8x01234567));
221 
222       const __m256i vi9x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i9));
223       const __m256i vk9x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 72 * sizeof(uint8_t)))), vk_zero_point);
224       i9 += 8;
225 
226       vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi9x01234567, vk9x01234567));
227 
228       const __m256i vi10x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i10));
229       const __m256i vk10x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 80 * sizeof(uint8_t)))), vk_zero_point);
230       i10 += 8;
231 
232       vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi10x01234567, vk10x01234567));
233 
234       const __m256i vi11x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i11));
235       const __m256i vk11x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 88 * sizeof(uint8_t)))), vk_zero_point);
236       i11 += 8;
237 
238       vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi11x01234567, vk11x01234567));
239 
240       const __m256i vi12x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i12));
241       const __m256i vk12x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 96 * sizeof(uint8_t)))), vk_zero_point);
242       i12 += 8;
243 
244       vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi12x01234567, vk12x01234567));
245 
246       const __m256i vi13x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i13));
247       const __m256i vk13x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 104 * sizeof(uint8_t)))), vk_zero_point);
248       i13 += 8;
249 
250       vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi13x01234567, vk13x01234567));
251 
252       const __m256i vi14x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i14));
253       const __m256i vk14x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 112 * sizeof(uint8_t)))), vk_zero_point);
254       i14 += 8;
255 
256       vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi14x01234567, vk14x01234567));
257 
258       const __m256i vi15x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i15));
259       const __m256i vk15x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 120 * sizeof(uint8_t)))), vk_zero_point);
260       i15 += 8;
261 
262       vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi15x01234567, vk15x01234567));
263 
264       const __m256i vi16x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i16));
265       const __m256i vk16x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 128 * sizeof(uint8_t)))), vk_zero_point);
266       i16 += 8;
267 
268       vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi16x01234567, vk16x01234567));
269 
270       const __m256i vi17x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i17));
271       const __m256i vk17x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 136 * sizeof(uint8_t)))), vk_zero_point);
272       i17 += 8;
273 
274       vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi17x01234567, vk17x01234567));
275 
276       const __m256i vi18x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i18));
277       const __m256i vk18x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 144 * sizeof(uint8_t)))), vk_zero_point);
278       i18 += 8;
279 
280       vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi18x01234567, vk18x01234567));
281 
282       const __m256i vi19x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i19));
283       const __m256i vk19x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 152 * sizeof(uint8_t)))), vk_zero_point);
284       i19 += 8;
285 
286       vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi19x01234567, vk19x01234567));
287 
288       const __m256i vi20x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i20));
289       const __m256i vk20x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 160 * sizeof(uint8_t)))), vk_zero_point);
290       i20 += 8;
291 
292       vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi20x01234567, vk20x01234567));
293 
294       const __m256i vi21x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i21));
295       const __m256i vk21x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 168 * sizeof(uint8_t)))), vk_zero_point);
296       i21 += 8;
297 
298       vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi21x01234567, vk21x01234567));
299 
300       const __m256i vi22x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i22));
301       const __m256i vk22x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 176 * sizeof(uint8_t)))), vk_zero_point);
302       i22 += 8;
303 
304       vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi22x01234567, vk22x01234567));
305 
306       const __m256i vi23x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i23));
307       const __m256i vk23x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 184 * sizeof(uint8_t)))), vk_zero_point);
308       i23 += 8;
309 
310       vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi23x01234567, vk23x01234567));
311 
312       const __m256i vi24x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i24));
313       const __m256i vk24x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 192 * sizeof(uint8_t)))), vk_zero_point);
314       i24 += 8;
315 
316       vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi24x01234567, vk24x01234567));
317 
318       w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 200 * sizeof(uint8_t));
319 
320       __m256 vscaled01234567 = _mm256_cvtepi32_ps(vacc01234567);
321 
322       const __m256 vscale = _mm256_load_ps(params->fp32_avx2.scale);
323       vscaled01234567 = _mm256_mul_ps(vscaled01234567, vscale);
324 
325       const __m256 voutput_max_less_zero_point = _mm256_load_ps(params->fp32_avx2.output_max_less_zero_point);
326       vscaled01234567 = _mm256_min_ps(vscaled01234567, voutput_max_less_zero_point);
327 
328       vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
329 
330       const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_avx2.output_zero_point);
331       __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
332 
333       __m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
334 
335       const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_avx2.output_min);
336       vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
337 
338       _mm_storel_epi64((__m128i*) output, vout0123456701234567);
339       output += 8;
340     }
341     if XNN_UNLIKELY(c != 0) {
342       {
343         __m256i vacc01234567 = _mm256_loadu_si256((const __m256i*) w);
344 
345 
346         const __m256i vi0x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i0));
347         const __m256i vk0x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(uint8_t)))), vk_zero_point);
348 
349         vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi0x01234567, vk0x01234567));
350 
351         const __m256i vi1x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i1));
352         const __m256i vk1x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(uint8_t)))), vk_zero_point);
353 
354         vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi1x01234567, vk1x01234567));
355 
356         const __m256i vi2x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i2));
357         const __m256i vk2x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(uint8_t)))), vk_zero_point);
358 
359         vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi2x01234567, vk2x01234567));
360 
361         const __m256i vi3x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i3));
362         const __m256i vk3x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(uint8_t)))), vk_zero_point);
363 
364         vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi3x01234567, vk3x01234567));
365 
366         const __m256i vi4x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i4));
367         const __m256i vk4x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(uint8_t)))), vk_zero_point);
368 
369         vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi4x01234567, vk4x01234567));
370 
371         const __m256i vi5x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i5));
372         const __m256i vk5x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(uint8_t)))), vk_zero_point);
373 
374         vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi5x01234567, vk5x01234567));
375 
376         const __m256i vi6x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i6));
377         const __m256i vk6x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(uint8_t)))), vk_zero_point);
378 
379         vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi6x01234567, vk6x01234567));
380 
381         const __m256i vi7x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i7));
382         const __m256i vk7x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(uint8_t)))), vk_zero_point);
383 
384         vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi7x01234567, vk7x01234567));
385 
386         const __m256i vi8x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i8));
387         const __m256i vk8x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(uint8_t)))), vk_zero_point);
388 
389         vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi8x01234567, vk8x01234567));
390 
391         const __m256i vi9x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i9));
392         const __m256i vk9x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 72 * sizeof(uint8_t)))), vk_zero_point);
393 
394         vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi9x01234567, vk9x01234567));
395 
396         const __m256i vi10x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i10));
397         const __m256i vk10x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 80 * sizeof(uint8_t)))), vk_zero_point);
398 
399         vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi10x01234567, vk10x01234567));
400 
401         const __m256i vi11x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i11));
402         const __m256i vk11x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 88 * sizeof(uint8_t)))), vk_zero_point);
403 
404         vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi11x01234567, vk11x01234567));
405 
406         const __m256i vi12x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i12));
407         const __m256i vk12x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 96 * sizeof(uint8_t)))), vk_zero_point);
408 
409         vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi12x01234567, vk12x01234567));
410 
411         const __m256i vi13x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i13));
412         const __m256i vk13x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 104 * sizeof(uint8_t)))), vk_zero_point);
413 
414         vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi13x01234567, vk13x01234567));
415 
416         const __m256i vi14x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i14));
417         const __m256i vk14x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 112 * sizeof(uint8_t)))), vk_zero_point);
418 
419         vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi14x01234567, vk14x01234567));
420 
421         const __m256i vi15x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i15));
422         const __m256i vk15x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 120 * sizeof(uint8_t)))), vk_zero_point);
423 
424         vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi15x01234567, vk15x01234567));
425 
426         const __m256i vi16x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i16));
427         const __m256i vk16x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 128 * sizeof(uint8_t)))), vk_zero_point);
428 
429         vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi16x01234567, vk16x01234567));
430 
431         const __m256i vi17x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i17));
432         const __m256i vk17x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 136 * sizeof(uint8_t)))), vk_zero_point);
433 
434         vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi17x01234567, vk17x01234567));
435 
436         const __m256i vi18x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i18));
437         const __m256i vk18x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 144 * sizeof(uint8_t)))), vk_zero_point);
438 
439         vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi18x01234567, vk18x01234567));
440 
441         const __m256i vi19x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i19));
442         const __m256i vk19x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 152 * sizeof(uint8_t)))), vk_zero_point);
443 
444         vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi19x01234567, vk19x01234567));
445 
446         const __m256i vi20x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i20));
447         const __m256i vk20x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 160 * sizeof(uint8_t)))), vk_zero_point);
448 
449         vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi20x01234567, vk20x01234567));
450 
451         const __m256i vi21x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i21));
452         const __m256i vk21x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 168 * sizeof(uint8_t)))), vk_zero_point);
453 
454         vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi21x01234567, vk21x01234567));
455 
456         const __m256i vi22x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i22));
457         const __m256i vk22x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 176 * sizeof(uint8_t)))), vk_zero_point);
458 
459         vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi22x01234567, vk22x01234567));
460 
461         const __m256i vi23x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i23));
462         const __m256i vk23x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 184 * sizeof(uint8_t)))), vk_zero_point);
463 
464         vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi23x01234567, vk23x01234567));
465 
466         const __m256i vi24x01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) i24));
467         const __m256i vk24x01234567 = _mm256_sub_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 192 * sizeof(uint8_t)))), vk_zero_point);
468 
469         vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi24x01234567, vk24x01234567));
470 
471 
472         __m256 vscaled01234567 = _mm256_cvtepi32_ps(vacc01234567);
473         vscaled01234567 = _mm256_mul_ps(vscaled01234567, _mm256_load_ps(params->fp32_avx2.scale));
474         vscaled01234567 = _mm256_min_ps(vscaled01234567, _mm256_load_ps(params->fp32_avx2.output_max_less_zero_point));
475         vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
476 
477 
478         const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_avx2.output_zero_point);
479         __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
480 
481         __m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
482 
483         const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_avx2.output_min);
484         vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
485 
486         if (c & 4) {
487           unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
488           vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
489           output += 4;
490         }
491         if (c & 2) {
492           unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
493           vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
494           output += 2;
495         }
496         if (c & 1) {
497           *output = (uint8_t) _mm_extract_epi8(vout0123456701234567, 0);
498           output += 1;
499         }
500       }
501     }
502 
503     output = (uint8_t*) ((uintptr_t) output + output_increment);
504   } while (--output_width != 0);
505 }
506