1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-dwconv/unipass-avx2-mul32.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <immintrin.h>
13
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/unaligned.h>
16
17
xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__avx2_mul32(size_t channels,size_t output_width,const int8_t ** input,const void * weights,int8_t * output,size_t input_stride,size_t output_increment,size_t input_offset,const int8_t * zero,const union xnn_qc8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__avx2_mul32(
19 size_t channels,
20 size_t output_width,
21 const int8_t** input,
22 const void* weights,
23 int8_t* output,
24 size_t input_stride,
25 size_t output_increment,
26 size_t input_offset,
27 const int8_t* zero,
28 const union xnn_qc8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
29 {
30 assert(channels != 0);
31 assert(output_width != 0);
32
33 do {
34 const int8_t* i0 = input[0];
35 assert(i0 != NULL);
36 if XNN_UNPREDICTABLE(i0 != zero) {
37 i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
38 }
39 const int8_t* i1 = input[1];
40 assert(i1 != NULL);
41 if XNN_UNPREDICTABLE(i1 != zero) {
42 i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
43 }
44 const int8_t* i2 = input[2];
45 assert(i2 != NULL);
46 if XNN_UNPREDICTABLE(i2 != zero) {
47 i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
48 }
49 const int8_t* i3 = input[3];
50 assert(i3 != NULL);
51 if XNN_UNPREDICTABLE(i3 != zero) {
52 i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
53 }
54 const int8_t* i4 = input[4];
55 assert(i4 != NULL);
56 if XNN_UNPREDICTABLE(i4 != zero) {
57 i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
58 }
59 const int8_t* i5 = input[5];
60 assert(i5 != NULL);
61 if XNN_UNPREDICTABLE(i5 != zero) {
62 i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
63 }
64 const int8_t* i6 = input[6];
65 assert(i6 != NULL);
66 if XNN_UNPREDICTABLE(i6 != zero) {
67 i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
68 }
69 const int8_t* i7 = input[7];
70 assert(i7 != NULL);
71 if XNN_UNPREDICTABLE(i7 != zero) {
72 i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
73 }
74 const int8_t* i8 = input[8];
75 assert(i8 != NULL);
76 if XNN_UNPREDICTABLE(i8 != zero) {
77 i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
78 }
79 const int8_t* i9 = input[9];
80 assert(i9 != NULL);
81 if XNN_UNPREDICTABLE(i9 != zero) {
82 i9 = (const int8_t*) ((uintptr_t) i9 + input_offset);
83 }
84 const int8_t* i10 = input[10];
85 assert(i10 != NULL);
86 if XNN_UNPREDICTABLE(i10 != zero) {
87 i10 = (const int8_t*) ((uintptr_t) i10 + input_offset);
88 }
89 const int8_t* i11 = input[11];
90 assert(i11 != NULL);
91 if XNN_UNPREDICTABLE(i11 != zero) {
92 i11 = (const int8_t*) ((uintptr_t) i11 + input_offset);
93 }
94 const int8_t* i12 = input[12];
95 assert(i12 != NULL);
96 if XNN_UNPREDICTABLE(i12 != zero) {
97 i12 = (const int8_t*) ((uintptr_t) i12 + input_offset);
98 }
99 const int8_t* i13 = input[13];
100 assert(i13 != NULL);
101 if XNN_UNPREDICTABLE(i13 != zero) {
102 i13 = (const int8_t*) ((uintptr_t) i13 + input_offset);
103 }
104 const int8_t* i14 = input[14];
105 assert(i14 != NULL);
106 if XNN_UNPREDICTABLE(i14 != zero) {
107 i14 = (const int8_t*) ((uintptr_t) i14 + input_offset);
108 }
109 const int8_t* i15 = input[15];
110 assert(i15 != NULL);
111 if XNN_UNPREDICTABLE(i15 != zero) {
112 i15 = (const int8_t*) ((uintptr_t) i15 + input_offset);
113 }
114 const int8_t* i16 = input[16];
115 assert(i16 != NULL);
116 if XNN_UNPREDICTABLE(i16 != zero) {
117 i16 = (const int8_t*) ((uintptr_t) i16 + input_offset);
118 }
119 const int8_t* i17 = input[17];
120 assert(i17 != NULL);
121 if XNN_UNPREDICTABLE(i17 != zero) {
122 i17 = (const int8_t*) ((uintptr_t) i17 + input_offset);
123 }
124 const int8_t* i18 = input[18];
125 assert(i18 != NULL);
126 if XNN_UNPREDICTABLE(i18 != zero) {
127 i18 = (const int8_t*) ((uintptr_t) i18 + input_offset);
128 }
129 const int8_t* i19 = input[19];
130 assert(i19 != NULL);
131 if XNN_UNPREDICTABLE(i19 != zero) {
132 i19 = (const int8_t*) ((uintptr_t) i19 + input_offset);
133 }
134 const int8_t* i20 = input[20];
135 assert(i20 != NULL);
136 if XNN_UNPREDICTABLE(i20 != zero) {
137 i20 = (const int8_t*) ((uintptr_t) i20 + input_offset);
138 }
139 const int8_t* i21 = input[21];
140 assert(i21 != NULL);
141 if XNN_UNPREDICTABLE(i21 != zero) {
142 i21 = (const int8_t*) ((uintptr_t) i21 + input_offset);
143 }
144 const int8_t* i22 = input[22];
145 assert(i22 != NULL);
146 if XNN_UNPREDICTABLE(i22 != zero) {
147 i22 = (const int8_t*) ((uintptr_t) i22 + input_offset);
148 }
149 const int8_t* i23 = input[23];
150 assert(i23 != NULL);
151 if XNN_UNPREDICTABLE(i23 != zero) {
152 i23 = (const int8_t*) ((uintptr_t) i23 + input_offset);
153 }
154 const int8_t* i24 = input[24];
155 assert(i24 != NULL);
156 if XNN_UNPREDICTABLE(i24 != zero) {
157 i24 = (const int8_t*) ((uintptr_t) i24 + input_offset);
158 }
159 input = (const int8_t**) ((uintptr_t) input + input_stride);
160
161 size_t c = channels;
162 const void* w = weights;
163 for (; c >= 8; c -= 8) {
164 __m256i vacc01234567 = _mm256_loadu_si256((const __m256i*) w);
165
166
167 const __m256i vi0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i0));
168 const __m256i vk0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t))));
169 i0 += 8;
170
171 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi0x01234567, vk0x01234567));
172
173 const __m256i vi1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i1));
174 const __m256i vk1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t))));
175 i1 += 8;
176
177 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi1x01234567, vk1x01234567));
178
179 const __m256i vi2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i2));
180 const __m256i vk2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t))));
181 i2 += 8;
182
183 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi2x01234567, vk2x01234567));
184
185 const __m256i vi3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i3));
186 const __m256i vk3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t))));
187 i3 += 8;
188
189 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi3x01234567, vk3x01234567));
190
191 const __m256i vi4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i4));
192 const __m256i vk4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t))));
193 i4 += 8;
194
195 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi4x01234567, vk4x01234567));
196
197 const __m256i vi5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i5));
198 const __m256i vk5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t))));
199 i5 += 8;
200
201 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi5x01234567, vk5x01234567));
202
203 const __m256i vi6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i6));
204 const __m256i vk6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(int8_t))));
205 i6 += 8;
206
207 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi6x01234567, vk6x01234567));
208
209 const __m256i vi7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i7));
210 const __m256i vk7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(int8_t))));
211 i7 += 8;
212
213 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi7x01234567, vk7x01234567));
214
215 const __m256i vi8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i8));
216 const __m256i vk8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(int8_t))));
217 i8 += 8;
218
219 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi8x01234567, vk8x01234567));
220
221 const __m256i vi9x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i9));
222 const __m256i vk9x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 72 * sizeof(int8_t))));
223 i9 += 8;
224
225 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi9x01234567, vk9x01234567));
226
227 const __m256i vi10x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i10));
228 const __m256i vk10x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 80 * sizeof(int8_t))));
229 i10 += 8;
230
231 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi10x01234567, vk10x01234567));
232
233 const __m256i vi11x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i11));
234 const __m256i vk11x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 88 * sizeof(int8_t))));
235 i11 += 8;
236
237 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi11x01234567, vk11x01234567));
238
239 const __m256i vi12x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i12));
240 const __m256i vk12x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 96 * sizeof(int8_t))));
241 i12 += 8;
242
243 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi12x01234567, vk12x01234567));
244
245 const __m256i vi13x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i13));
246 const __m256i vk13x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 104 * sizeof(int8_t))));
247 i13 += 8;
248
249 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi13x01234567, vk13x01234567));
250
251 const __m256i vi14x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i14));
252 const __m256i vk14x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 112 * sizeof(int8_t))));
253 i14 += 8;
254
255 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi14x01234567, vk14x01234567));
256
257 const __m256i vi15x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i15));
258 const __m256i vk15x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 120 * sizeof(int8_t))));
259 i15 += 8;
260
261 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi15x01234567, vk15x01234567));
262
263 const __m256i vi16x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i16));
264 const __m256i vk16x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 128 * sizeof(int8_t))));
265 i16 += 8;
266
267 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi16x01234567, vk16x01234567));
268
269 const __m256i vi17x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i17));
270 const __m256i vk17x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 136 * sizeof(int8_t))));
271 i17 += 8;
272
273 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi17x01234567, vk17x01234567));
274
275 const __m256i vi18x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i18));
276 const __m256i vk18x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 144 * sizeof(int8_t))));
277 i18 += 8;
278
279 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi18x01234567, vk18x01234567));
280
281 const __m256i vi19x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i19));
282 const __m256i vk19x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 152 * sizeof(int8_t))));
283 i19 += 8;
284
285 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi19x01234567, vk19x01234567));
286
287 const __m256i vi20x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i20));
288 const __m256i vk20x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 160 * sizeof(int8_t))));
289 i20 += 8;
290
291 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi20x01234567, vk20x01234567));
292
293 const __m256i vi21x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i21));
294 const __m256i vk21x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 168 * sizeof(int8_t))));
295 i21 += 8;
296
297 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi21x01234567, vk21x01234567));
298
299 const __m256i vi22x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i22));
300 const __m256i vk22x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 176 * sizeof(int8_t))));
301 i22 += 8;
302
303 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi22x01234567, vk22x01234567));
304
305 const __m256i vi23x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i23));
306 const __m256i vk23x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 184 * sizeof(int8_t))));
307 i23 += 8;
308
309 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi23x01234567, vk23x01234567));
310
311 const __m256i vi24x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i24));
312 const __m256i vk24x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 192 * sizeof(int8_t))));
313 i24 += 8;
314
315 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi24x01234567, vk24x01234567));
316
317 w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 200 * sizeof(int8_t));
318
319 __m256 vscaled01234567 = _mm256_cvtepi32_ps(vacc01234567);
320
321 const __m256 vscale01234567 = _mm256_loadu_ps((const float*) w);
322 w = (const void*) ((const float*) w + 8);
323 vscaled01234567 = _mm256_mul_ps(vscaled01234567, vscale01234567);
324
325 const __m256 voutput_max_less_zero_point = _mm256_load_ps(params->fp32_avx2.output_max_less_zero_point);
326 vscaled01234567 = _mm256_min_ps(vscaled01234567, voutput_max_less_zero_point);
327
328 vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
329
330 const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_avx2.output_zero_point);
331 __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
332
333 __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
334
335 const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_avx2.output_min);
336 vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
337
338 _mm_storel_epi64((__m128i*) output, vout0123456701234567);
339 output += 8;
340 }
341 if XNN_UNLIKELY(c != 0) {
342 {
343 __m256i vacc01234567 = _mm256_loadu_si256((const __m256i*) w);
344
345
346 const __m256i vi0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i0));
347 const __m256i vk0x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t))));
348
349 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi0x01234567, vk0x01234567));
350
351 const __m256i vi1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i1));
352 const __m256i vk1x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t))));
353
354 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi1x01234567, vk1x01234567));
355
356 const __m256i vi2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i2));
357 const __m256i vk2x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t))));
358
359 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi2x01234567, vk2x01234567));
360
361 const __m256i vi3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i3));
362 const __m256i vk3x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t))));
363
364 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi3x01234567, vk3x01234567));
365
366 const __m256i vi4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i4));
367 const __m256i vk4x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t))));
368
369 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi4x01234567, vk4x01234567));
370
371 const __m256i vi5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i5));
372 const __m256i vk5x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t))));
373
374 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi5x01234567, vk5x01234567));
375
376 const __m256i vi6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i6));
377 const __m256i vk6x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(int8_t))));
378
379 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi6x01234567, vk6x01234567));
380
381 const __m256i vi7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i7));
382 const __m256i vk7x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(int8_t))));
383
384 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi7x01234567, vk7x01234567));
385
386 const __m256i vi8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i8));
387 const __m256i vk8x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(int8_t))));
388
389 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi8x01234567, vk8x01234567));
390
391 const __m256i vi9x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i9));
392 const __m256i vk9x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 72 * sizeof(int8_t))));
393
394 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi9x01234567, vk9x01234567));
395
396 const __m256i vi10x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i10));
397 const __m256i vk10x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 80 * sizeof(int8_t))));
398
399 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi10x01234567, vk10x01234567));
400
401 const __m256i vi11x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i11));
402 const __m256i vk11x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 88 * sizeof(int8_t))));
403
404 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi11x01234567, vk11x01234567));
405
406 const __m256i vi12x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i12));
407 const __m256i vk12x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 96 * sizeof(int8_t))));
408
409 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi12x01234567, vk12x01234567));
410
411 const __m256i vi13x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i13));
412 const __m256i vk13x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 104 * sizeof(int8_t))));
413
414 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi13x01234567, vk13x01234567));
415
416 const __m256i vi14x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i14));
417 const __m256i vk14x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 112 * sizeof(int8_t))));
418
419 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi14x01234567, vk14x01234567));
420
421 const __m256i vi15x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i15));
422 const __m256i vk15x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 120 * sizeof(int8_t))));
423
424 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi15x01234567, vk15x01234567));
425
426 const __m256i vi16x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i16));
427 const __m256i vk16x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 128 * sizeof(int8_t))));
428
429 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi16x01234567, vk16x01234567));
430
431 const __m256i vi17x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i17));
432 const __m256i vk17x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 136 * sizeof(int8_t))));
433
434 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi17x01234567, vk17x01234567));
435
436 const __m256i vi18x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i18));
437 const __m256i vk18x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 144 * sizeof(int8_t))));
438
439 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi18x01234567, vk18x01234567));
440
441 const __m256i vi19x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i19));
442 const __m256i vk19x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 152 * sizeof(int8_t))));
443
444 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi19x01234567, vk19x01234567));
445
446 const __m256i vi20x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i20));
447 const __m256i vk20x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 160 * sizeof(int8_t))));
448
449 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi20x01234567, vk20x01234567));
450
451 const __m256i vi21x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i21));
452 const __m256i vk21x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 168 * sizeof(int8_t))));
453
454 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi21x01234567, vk21x01234567));
455
456 const __m256i vi22x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i22));
457 const __m256i vk22x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 176 * sizeof(int8_t))));
458
459 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi22x01234567, vk22x01234567));
460
461 const __m256i vi23x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i23));
462 const __m256i vk23x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 184 * sizeof(int8_t))));
463
464 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi23x01234567, vk23x01234567));
465
466 const __m256i vi24x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) i24));
467 const __m256i vk24x01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 192 * sizeof(int8_t))));
468
469 vacc01234567 = _mm256_add_epi32(vacc01234567, _mm256_mullo_epi32(vi24x01234567, vk24x01234567));
470
471
472 __m256 vscaled01234567 = _mm256_cvtepi32_ps(vacc01234567);
473 const __m256 vscale01234567 = _mm256_loadu_ps((const float*) ((uintptr_t) w + 8 * sizeof(int32_t) + 200 * sizeof(int8_t)));
474 vscaled01234567 = _mm256_mul_ps(vscaled01234567, vscale01234567);
475 vscaled01234567 = _mm256_min_ps(vscaled01234567, _mm256_load_ps(params->fp32_avx2.output_max_less_zero_point));
476 vacc01234567 = _mm256_cvtps_epi32(vscaled01234567);
477
478
479 const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_avx2.output_zero_point);
480 __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(_mm256_castsi256_si128(vacc01234567), _mm256_extracti128_si256(vacc01234567, 1)), voutput_zero_point);
481
482 __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
483
484 const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_avx2.output_min);
485 vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
486
487 if (c & 4) {
488 unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
489 vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
490 output += 4;
491 }
492 if (c & 2) {
493 unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
494 vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
495 output += 2;
496 }
497 if (c & 1) {
498 *output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
499 output += 1;
500 }
501 }
502 }
503
504 output = (int8_t*) ((uintptr_t) output + output_increment);
505 } while (--output_width != 0);
506 }
507