xref: /aosp_15_r20/external/XNNPACK/src/qc8-dwconv/gen/up16x25-minmax-fp32-xop-mul16-add16.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-dwconv/unipass-sse-mul16.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #if defined(__GNUC__) || defined(__clang__)
13   #include <x86intrin.h>
14 #else
15   #include <immintrin.h>
16   #include <ammintrin.h>
17 #endif
18 
19 #include <xnnpack/dwconv.h>
20 #include <xnnpack/unaligned.h>
21 
22 
xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__xop_mul16_add16(size_t channels,size_t output_width,const int8_t ** input,const void * weights,int8_t * output,size_t input_stride,size_t output_increment,size_t input_offset,const int8_t * zero,const union xnn_qc8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])23 void xnn_qc8_dwconv_minmax_fp32_ukernel_up16x25__xop_mul16_add16(
24     size_t channels,
25     size_t output_width,
26     const int8_t** input,
27     const void* weights,
28     int8_t* output,
29     size_t input_stride,
30     size_t output_increment,
31     size_t input_offset,
32     const int8_t* zero,
33     const union xnn_qc8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
34 {
35   assert(channels != 0);
36   assert(output_width != 0);
37 
38   do {
39     const int8_t* i0 = input[0];
40     assert(i0 != NULL);
41     if XNN_UNPREDICTABLE(i0 != zero) {
42       i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
43     }
44     const int8_t* i1 = input[1];
45     assert(i1 != NULL);
46     if XNN_UNPREDICTABLE(i1 != zero) {
47       i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
48     }
49     const int8_t* i2 = input[2];
50     assert(i2 != NULL);
51     if XNN_UNPREDICTABLE(i2 != zero) {
52       i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
53     }
54     const int8_t* i3 = input[3];
55     assert(i3 != NULL);
56     if XNN_UNPREDICTABLE(i3 != zero) {
57       i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
58     }
59     const int8_t* i4 = input[4];
60     assert(i4 != NULL);
61     if XNN_UNPREDICTABLE(i4 != zero) {
62       i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
63     }
64     const int8_t* i5 = input[5];
65     assert(i5 != NULL);
66     if XNN_UNPREDICTABLE(i5 != zero) {
67       i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
68     }
69     const int8_t* i6 = input[6];
70     assert(i6 != NULL);
71     if XNN_UNPREDICTABLE(i6 != zero) {
72       i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
73     }
74     const int8_t* i7 = input[7];
75     assert(i7 != NULL);
76     if XNN_UNPREDICTABLE(i7 != zero) {
77       i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
78     }
79     const int8_t* i8 = input[8];
80     assert(i8 != NULL);
81     if XNN_UNPREDICTABLE(i8 != zero) {
82       i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
83     }
84     const int8_t* i9 = input[9];
85     assert(i9 != NULL);
86     if XNN_UNPREDICTABLE(i9 != zero) {
87       i9 = (const int8_t*) ((uintptr_t) i9 + input_offset);
88     }
89     const int8_t* i10 = input[10];
90     assert(i10 != NULL);
91     if XNN_UNPREDICTABLE(i10 != zero) {
92       i10 = (const int8_t*) ((uintptr_t) i10 + input_offset);
93     }
94     const int8_t* i11 = input[11];
95     assert(i11 != NULL);
96     if XNN_UNPREDICTABLE(i11 != zero) {
97       i11 = (const int8_t*) ((uintptr_t) i11 + input_offset);
98     }
99     const int8_t* i12 = input[12];
100     assert(i12 != NULL);
101     if XNN_UNPREDICTABLE(i12 != zero) {
102       i12 = (const int8_t*) ((uintptr_t) i12 + input_offset);
103     }
104     const int8_t* i13 = input[13];
105     assert(i13 != NULL);
106     if XNN_UNPREDICTABLE(i13 != zero) {
107       i13 = (const int8_t*) ((uintptr_t) i13 + input_offset);
108     }
109     const int8_t* i14 = input[14];
110     assert(i14 != NULL);
111     if XNN_UNPREDICTABLE(i14 != zero) {
112       i14 = (const int8_t*) ((uintptr_t) i14 + input_offset);
113     }
114     const int8_t* i15 = input[15];
115     assert(i15 != NULL);
116     if XNN_UNPREDICTABLE(i15 != zero) {
117       i15 = (const int8_t*) ((uintptr_t) i15 + input_offset);
118     }
119     const int8_t* i16 = input[16];
120     assert(i16 != NULL);
121     if XNN_UNPREDICTABLE(i16 != zero) {
122       i16 = (const int8_t*) ((uintptr_t) i16 + input_offset);
123     }
124     const int8_t* i17 = input[17];
125     assert(i17 != NULL);
126     if XNN_UNPREDICTABLE(i17 != zero) {
127       i17 = (const int8_t*) ((uintptr_t) i17 + input_offset);
128     }
129     const int8_t* i18 = input[18];
130     assert(i18 != NULL);
131     if XNN_UNPREDICTABLE(i18 != zero) {
132       i18 = (const int8_t*) ((uintptr_t) i18 + input_offset);
133     }
134     const int8_t* i19 = input[19];
135     assert(i19 != NULL);
136     if XNN_UNPREDICTABLE(i19 != zero) {
137       i19 = (const int8_t*) ((uintptr_t) i19 + input_offset);
138     }
139     const int8_t* i20 = input[20];
140     assert(i20 != NULL);
141     if XNN_UNPREDICTABLE(i20 != zero) {
142       i20 = (const int8_t*) ((uintptr_t) i20 + input_offset);
143     }
144     const int8_t* i21 = input[21];
145     assert(i21 != NULL);
146     if XNN_UNPREDICTABLE(i21 != zero) {
147       i21 = (const int8_t*) ((uintptr_t) i21 + input_offset);
148     }
149     const int8_t* i22 = input[22];
150     assert(i22 != NULL);
151     if XNN_UNPREDICTABLE(i22 != zero) {
152       i22 = (const int8_t*) ((uintptr_t) i22 + input_offset);
153     }
154     const int8_t* i23 = input[23];
155     assert(i23 != NULL);
156     if XNN_UNPREDICTABLE(i23 != zero) {
157       i23 = (const int8_t*) ((uintptr_t) i23 + input_offset);
158     }
159     const int8_t* i24 = input[24];
160     assert(i24 != NULL);
161     if XNN_UNPREDICTABLE(i24 != zero) {
162       i24 = (const int8_t*) ((uintptr_t) i24 + input_offset);
163     }
164     input = (const int8_t**) ((uintptr_t) input + input_stride);
165 
166     size_t c = channels;
167     const void* w = weights;
168     for (; c >= 16; c -= 16) {
169       __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
170       __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
171       __m128i vacc89AB = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 8));
172       __m128i vaccCDEF = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 12));
173 
174 
175       const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
176       const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
177       const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 0 * sizeof(int8_t)));
178       const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
179       const __m128i vi0x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i0 + 8));
180       const __m128i vxi0x89ABCDEF = _mm_cvtepi8_epi16(vi0x89ABCDEF);
181       const __m128i vk0x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 8 * sizeof(int8_t)));
182       const __m128i vxk0x89ABCDEF = _mm_cvtepi8_epi16(vk0x89ABCDEF);
183       i0 += 16;
184 
185 
186       __m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
187       __m128i vprod89ABCDEF = _mm_mullo_epi16(vxi0x89ABCDEF, vxk0x89ABCDEF);
188 
189 
190       const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
191       const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
192       const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 16 * sizeof(int8_t)));
193       const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
194       const __m128i vi1x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i1 + 8));
195       const __m128i vxi1x89ABCDEF = _mm_cvtepi8_epi16(vi1x89ABCDEF);
196       const __m128i vk1x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 24 * sizeof(int8_t)));
197       const __m128i vxk1x89ABCDEF = _mm_cvtepi8_epi16(vk1x89ABCDEF);
198       i1 += 16;
199 
200 
201       vprod01234567 = _mm_macc_epi16(vxi1x01234567, vxk1x01234567, vprod01234567);
202       vprod89ABCDEF = _mm_macc_epi16(vxi1x89ABCDEF, vxk1x89ABCDEF, vprod89ABCDEF);
203 
204       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
205       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
206       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
207       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
208 
209       const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
210       const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
211       const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 32 * sizeof(int8_t)));
212       const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
213       const __m128i vi2x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i2 + 8));
214       const __m128i vxi2x89ABCDEF = _mm_cvtepi8_epi16(vi2x89ABCDEF);
215       const __m128i vk2x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 40 * sizeof(int8_t)));
216       const __m128i vxk2x89ABCDEF = _mm_cvtepi8_epi16(vk2x89ABCDEF);
217       i2 += 16;
218 
219 
220       vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
221       vprod89ABCDEF = _mm_mullo_epi16(vxi2x89ABCDEF, vxk2x89ABCDEF);
222 
223 
224       const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
225       const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
226       const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 48 * sizeof(int8_t)));
227       const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
228       const __m128i vi3x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i3 + 8));
229       const __m128i vxi3x89ABCDEF = _mm_cvtepi8_epi16(vi3x89ABCDEF);
230       const __m128i vk3x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 56 * sizeof(int8_t)));
231       const __m128i vxk3x89ABCDEF = _mm_cvtepi8_epi16(vk3x89ABCDEF);
232       i3 += 16;
233 
234 
235       vprod01234567 = _mm_macc_epi16(vxi3x01234567, vxk3x01234567, vprod01234567);
236       vprod89ABCDEF = _mm_macc_epi16(vxi3x89ABCDEF, vxk3x89ABCDEF, vprod89ABCDEF);
237 
238       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
239       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
240       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
241       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
242 
243       const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
244       const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
245       const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 64 * sizeof(int8_t)));
246       const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
247       const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8));
248       const __m128i vxi4x89ABCDEF = _mm_cvtepi8_epi16(vi4x89ABCDEF);
249       const __m128i vk4x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 72 * sizeof(int8_t)));
250       const __m128i vxk4x89ABCDEF = _mm_cvtepi8_epi16(vk4x89ABCDEF);
251       i4 += 16;
252 
253 
254       vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
255       vprod89ABCDEF = _mm_mullo_epi16(vxi4x89ABCDEF, vxk4x89ABCDEF);
256 
257 
258       const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
259       const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
260       const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 80 * sizeof(int8_t)));
261       const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
262       const __m128i vi5x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i5 + 8));
263       const __m128i vxi5x89ABCDEF = _mm_cvtepi8_epi16(vi5x89ABCDEF);
264       const __m128i vk5x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 88 * sizeof(int8_t)));
265       const __m128i vxk5x89ABCDEF = _mm_cvtepi8_epi16(vk5x89ABCDEF);
266       i5 += 16;
267 
268 
269       vprod01234567 = _mm_macc_epi16(vxi5x01234567, vxk5x01234567, vprod01234567);
270       vprod89ABCDEF = _mm_macc_epi16(vxi5x89ABCDEF, vxk5x89ABCDEF, vprod89ABCDEF);
271 
272       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
273       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
274       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
275       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
276 
277       const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
278       const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
279       const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 96 * sizeof(int8_t)));
280       const __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
281       const __m128i vi6x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i6 + 8));
282       const __m128i vxi6x89ABCDEF = _mm_cvtepi8_epi16(vi6x89ABCDEF);
283       const __m128i vk6x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 104 * sizeof(int8_t)));
284       const __m128i vxk6x89ABCDEF = _mm_cvtepi8_epi16(vk6x89ABCDEF);
285       i6 += 16;
286 
287 
288       vprod01234567 = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
289       vprod89ABCDEF = _mm_mullo_epi16(vxi6x89ABCDEF, vxk6x89ABCDEF);
290 
291 
292       const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
293       const __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
294       const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 112 * sizeof(int8_t)));
295       const __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
296       const __m128i vi7x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i7 + 8));
297       const __m128i vxi7x89ABCDEF = _mm_cvtepi8_epi16(vi7x89ABCDEF);
298       const __m128i vk7x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 120 * sizeof(int8_t)));
299       const __m128i vxk7x89ABCDEF = _mm_cvtepi8_epi16(vk7x89ABCDEF);
300       i7 += 16;
301 
302 
303       vprod01234567 = _mm_macc_epi16(vxi7x01234567, vxk7x01234567, vprod01234567);
304       vprod89ABCDEF = _mm_macc_epi16(vxi7x89ABCDEF, vxk7x89ABCDEF, vprod89ABCDEF);
305 
306       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
307       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
308       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
309       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
310 
311       const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
312       const __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
313       const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 128 * sizeof(int8_t)));
314       const __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
315       const __m128i vi8x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i8 + 8));
316       const __m128i vxi8x89ABCDEF = _mm_cvtepi8_epi16(vi8x89ABCDEF);
317       const __m128i vk8x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 136 * sizeof(int8_t)));
318       const __m128i vxk8x89ABCDEF = _mm_cvtepi8_epi16(vk8x89ABCDEF);
319       i8 += 16;
320 
321 
322       vprod01234567 = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
323       vprod89ABCDEF = _mm_mullo_epi16(vxi8x89ABCDEF, vxk8x89ABCDEF);
324 
325 
326       const __m128i vi9x01234567 = _mm_loadl_epi64((const __m128i*) i9);
327       const __m128i vxi9x01234567 = _mm_cvtepi8_epi16(vi9x01234567);
328       const __m128i vk9x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 144 * sizeof(int8_t)));
329       const __m128i vxk9x01234567 = _mm_cvtepi8_epi16(vk9x01234567);
330       const __m128i vi9x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i9 + 8));
331       const __m128i vxi9x89ABCDEF = _mm_cvtepi8_epi16(vi9x89ABCDEF);
332       const __m128i vk9x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 152 * sizeof(int8_t)));
333       const __m128i vxk9x89ABCDEF = _mm_cvtepi8_epi16(vk9x89ABCDEF);
334       i9 += 16;
335 
336 
337       vprod01234567 = _mm_macc_epi16(vxi9x01234567, vxk9x01234567, vprod01234567);
338       vprod89ABCDEF = _mm_macc_epi16(vxi9x89ABCDEF, vxk9x89ABCDEF, vprod89ABCDEF);
339 
340       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
341       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
342       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
343       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
344 
345       const __m128i vi10x01234567 = _mm_loadl_epi64((const __m128i*) i10);
346       const __m128i vxi10x01234567 = _mm_cvtepi8_epi16(vi10x01234567);
347       const __m128i vk10x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 160 * sizeof(int8_t)));
348       const __m128i vxk10x01234567 = _mm_cvtepi8_epi16(vk10x01234567);
349       const __m128i vi10x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i10 + 8));
350       const __m128i vxi10x89ABCDEF = _mm_cvtepi8_epi16(vi10x89ABCDEF);
351       const __m128i vk10x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 168 * sizeof(int8_t)));
352       const __m128i vxk10x89ABCDEF = _mm_cvtepi8_epi16(vk10x89ABCDEF);
353       i10 += 16;
354 
355 
356       vprod01234567 = _mm_mullo_epi16(vxi10x01234567, vxk10x01234567);
357       vprod89ABCDEF = _mm_mullo_epi16(vxi10x89ABCDEF, vxk10x89ABCDEF);
358 
359 
360       const __m128i vi11x01234567 = _mm_loadl_epi64((const __m128i*) i11);
361       const __m128i vxi11x01234567 = _mm_cvtepi8_epi16(vi11x01234567);
362       const __m128i vk11x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 176 * sizeof(int8_t)));
363       const __m128i vxk11x01234567 = _mm_cvtepi8_epi16(vk11x01234567);
364       const __m128i vi11x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i11 + 8));
365       const __m128i vxi11x89ABCDEF = _mm_cvtepi8_epi16(vi11x89ABCDEF);
366       const __m128i vk11x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 184 * sizeof(int8_t)));
367       const __m128i vxk11x89ABCDEF = _mm_cvtepi8_epi16(vk11x89ABCDEF);
368       i11 += 16;
369 
370 
371       vprod01234567 = _mm_macc_epi16(vxi11x01234567, vxk11x01234567, vprod01234567);
372       vprod89ABCDEF = _mm_macc_epi16(vxi11x89ABCDEF, vxk11x89ABCDEF, vprod89ABCDEF);
373 
374       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
375       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
376       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
377       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
378 
379       const __m128i vi12x01234567 = _mm_loadl_epi64((const __m128i*) i12);
380       const __m128i vxi12x01234567 = _mm_cvtepi8_epi16(vi12x01234567);
381       const __m128i vk12x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 192 * sizeof(int8_t)));
382       const __m128i vxk12x01234567 = _mm_cvtepi8_epi16(vk12x01234567);
383       const __m128i vi12x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i12 + 8));
384       const __m128i vxi12x89ABCDEF = _mm_cvtepi8_epi16(vi12x89ABCDEF);
385       const __m128i vk12x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 200 * sizeof(int8_t)));
386       const __m128i vxk12x89ABCDEF = _mm_cvtepi8_epi16(vk12x89ABCDEF);
387       i12 += 16;
388 
389 
390       vprod01234567 = _mm_mullo_epi16(vxi12x01234567, vxk12x01234567);
391       vprod89ABCDEF = _mm_mullo_epi16(vxi12x89ABCDEF, vxk12x89ABCDEF);
392 
393 
394       const __m128i vi13x01234567 = _mm_loadl_epi64((const __m128i*) i13);
395       const __m128i vxi13x01234567 = _mm_cvtepi8_epi16(vi13x01234567);
396       const __m128i vk13x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 208 * sizeof(int8_t)));
397       const __m128i vxk13x01234567 = _mm_cvtepi8_epi16(vk13x01234567);
398       const __m128i vi13x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i13 + 8));
399       const __m128i vxi13x89ABCDEF = _mm_cvtepi8_epi16(vi13x89ABCDEF);
400       const __m128i vk13x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 216 * sizeof(int8_t)));
401       const __m128i vxk13x89ABCDEF = _mm_cvtepi8_epi16(vk13x89ABCDEF);
402       i13 += 16;
403 
404 
405       vprod01234567 = _mm_macc_epi16(vxi13x01234567, vxk13x01234567, vprod01234567);
406       vprod89ABCDEF = _mm_macc_epi16(vxi13x89ABCDEF, vxk13x89ABCDEF, vprod89ABCDEF);
407 
408       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
409       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
410       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
411       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
412 
413       const __m128i vi14x01234567 = _mm_loadl_epi64((const __m128i*) i14);
414       const __m128i vxi14x01234567 = _mm_cvtepi8_epi16(vi14x01234567);
415       const __m128i vk14x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 224 * sizeof(int8_t)));
416       const __m128i vxk14x01234567 = _mm_cvtepi8_epi16(vk14x01234567);
417       const __m128i vi14x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i14 + 8));
418       const __m128i vxi14x89ABCDEF = _mm_cvtepi8_epi16(vi14x89ABCDEF);
419       const __m128i vk14x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 232 * sizeof(int8_t)));
420       const __m128i vxk14x89ABCDEF = _mm_cvtepi8_epi16(vk14x89ABCDEF);
421       i14 += 16;
422 
423 
424       vprod01234567 = _mm_mullo_epi16(vxi14x01234567, vxk14x01234567);
425       vprod89ABCDEF = _mm_mullo_epi16(vxi14x89ABCDEF, vxk14x89ABCDEF);
426 
427 
428       const __m128i vi15x01234567 = _mm_loadl_epi64((const __m128i*) i15);
429       const __m128i vxi15x01234567 = _mm_cvtepi8_epi16(vi15x01234567);
430       const __m128i vk15x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 240 * sizeof(int8_t)));
431       const __m128i vxk15x01234567 = _mm_cvtepi8_epi16(vk15x01234567);
432       const __m128i vi15x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i15 + 8));
433       const __m128i vxi15x89ABCDEF = _mm_cvtepi8_epi16(vi15x89ABCDEF);
434       const __m128i vk15x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 248 * sizeof(int8_t)));
435       const __m128i vxk15x89ABCDEF = _mm_cvtepi8_epi16(vk15x89ABCDEF);
436       i15 += 16;
437 
438 
439       vprod01234567 = _mm_macc_epi16(vxi15x01234567, vxk15x01234567, vprod01234567);
440       vprod89ABCDEF = _mm_macc_epi16(vxi15x89ABCDEF, vxk15x89ABCDEF, vprod89ABCDEF);
441 
442       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
443       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
444       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
445       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
446 
447       const __m128i vi16x01234567 = _mm_loadl_epi64((const __m128i*) i16);
448       const __m128i vxi16x01234567 = _mm_cvtepi8_epi16(vi16x01234567);
449       const __m128i vk16x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 256 * sizeof(int8_t)));
450       const __m128i vxk16x01234567 = _mm_cvtepi8_epi16(vk16x01234567);
451       const __m128i vi16x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i16 + 8));
452       const __m128i vxi16x89ABCDEF = _mm_cvtepi8_epi16(vi16x89ABCDEF);
453       const __m128i vk16x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 264 * sizeof(int8_t)));
454       const __m128i vxk16x89ABCDEF = _mm_cvtepi8_epi16(vk16x89ABCDEF);
455       i16 += 16;
456 
457 
458       vprod01234567 = _mm_mullo_epi16(vxi16x01234567, vxk16x01234567);
459       vprod89ABCDEF = _mm_mullo_epi16(vxi16x89ABCDEF, vxk16x89ABCDEF);
460 
461 
462       const __m128i vi17x01234567 = _mm_loadl_epi64((const __m128i*) i17);
463       const __m128i vxi17x01234567 = _mm_cvtepi8_epi16(vi17x01234567);
464       const __m128i vk17x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 272 * sizeof(int8_t)));
465       const __m128i vxk17x01234567 = _mm_cvtepi8_epi16(vk17x01234567);
466       const __m128i vi17x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i17 + 8));
467       const __m128i vxi17x89ABCDEF = _mm_cvtepi8_epi16(vi17x89ABCDEF);
468       const __m128i vk17x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 280 * sizeof(int8_t)));
469       const __m128i vxk17x89ABCDEF = _mm_cvtepi8_epi16(vk17x89ABCDEF);
470       i17 += 16;
471 
472 
473       vprod01234567 = _mm_macc_epi16(vxi17x01234567, vxk17x01234567, vprod01234567);
474       vprod89ABCDEF = _mm_macc_epi16(vxi17x89ABCDEF, vxk17x89ABCDEF, vprod89ABCDEF);
475 
476       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
477       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
478       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
479       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
480 
481       const __m128i vi18x01234567 = _mm_loadl_epi64((const __m128i*) i18);
482       const __m128i vxi18x01234567 = _mm_cvtepi8_epi16(vi18x01234567);
483       const __m128i vk18x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 288 * sizeof(int8_t)));
484       const __m128i vxk18x01234567 = _mm_cvtepi8_epi16(vk18x01234567);
485       const __m128i vi18x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i18 + 8));
486       const __m128i vxi18x89ABCDEF = _mm_cvtepi8_epi16(vi18x89ABCDEF);
487       const __m128i vk18x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 296 * sizeof(int8_t)));
488       const __m128i vxk18x89ABCDEF = _mm_cvtepi8_epi16(vk18x89ABCDEF);
489       i18 += 16;
490 
491 
492       vprod01234567 = _mm_mullo_epi16(vxi18x01234567, vxk18x01234567);
493       vprod89ABCDEF = _mm_mullo_epi16(vxi18x89ABCDEF, vxk18x89ABCDEF);
494 
495 
496       const __m128i vi19x01234567 = _mm_loadl_epi64((const __m128i*) i19);
497       const __m128i vxi19x01234567 = _mm_cvtepi8_epi16(vi19x01234567);
498       const __m128i vk19x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 304 * sizeof(int8_t)));
499       const __m128i vxk19x01234567 = _mm_cvtepi8_epi16(vk19x01234567);
500       const __m128i vi19x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i19 + 8));
501       const __m128i vxi19x89ABCDEF = _mm_cvtepi8_epi16(vi19x89ABCDEF);
502       const __m128i vk19x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 312 * sizeof(int8_t)));
503       const __m128i vxk19x89ABCDEF = _mm_cvtepi8_epi16(vk19x89ABCDEF);
504       i19 += 16;
505 
506 
507       vprod01234567 = _mm_macc_epi16(vxi19x01234567, vxk19x01234567, vprod01234567);
508       vprod89ABCDEF = _mm_macc_epi16(vxi19x89ABCDEF, vxk19x89ABCDEF, vprod89ABCDEF);
509 
510       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
511       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
512       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
513       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
514 
515       const __m128i vi20x01234567 = _mm_loadl_epi64((const __m128i*) i20);
516       const __m128i vxi20x01234567 = _mm_cvtepi8_epi16(vi20x01234567);
517       const __m128i vk20x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 320 * sizeof(int8_t)));
518       const __m128i vxk20x01234567 = _mm_cvtepi8_epi16(vk20x01234567);
519       const __m128i vi20x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i20 + 8));
520       const __m128i vxi20x89ABCDEF = _mm_cvtepi8_epi16(vi20x89ABCDEF);
521       const __m128i vk20x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 328 * sizeof(int8_t)));
522       const __m128i vxk20x89ABCDEF = _mm_cvtepi8_epi16(vk20x89ABCDEF);
523       i20 += 16;
524 
525 
526       vprod01234567 = _mm_mullo_epi16(vxi20x01234567, vxk20x01234567);
527       vprod89ABCDEF = _mm_mullo_epi16(vxi20x89ABCDEF, vxk20x89ABCDEF);
528 
529 
530       const __m128i vi21x01234567 = _mm_loadl_epi64((const __m128i*) i21);
531       const __m128i vxi21x01234567 = _mm_cvtepi8_epi16(vi21x01234567);
532       const __m128i vk21x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 336 * sizeof(int8_t)));
533       const __m128i vxk21x01234567 = _mm_cvtepi8_epi16(vk21x01234567);
534       const __m128i vi21x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i21 + 8));
535       const __m128i vxi21x89ABCDEF = _mm_cvtepi8_epi16(vi21x89ABCDEF);
536       const __m128i vk21x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 344 * sizeof(int8_t)));
537       const __m128i vxk21x89ABCDEF = _mm_cvtepi8_epi16(vk21x89ABCDEF);
538       i21 += 16;
539 
540 
541       vprod01234567 = _mm_macc_epi16(vxi21x01234567, vxk21x01234567, vprod01234567);
542       vprod89ABCDEF = _mm_macc_epi16(vxi21x89ABCDEF, vxk21x89ABCDEF, vprod89ABCDEF);
543 
544       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
545       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
546       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
547       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
548 
549       const __m128i vi22x01234567 = _mm_loadl_epi64((const __m128i*) i22);
550       const __m128i vxi22x01234567 = _mm_cvtepi8_epi16(vi22x01234567);
551       const __m128i vk22x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 352 * sizeof(int8_t)));
552       const __m128i vxk22x01234567 = _mm_cvtepi8_epi16(vk22x01234567);
553       const __m128i vi22x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i22 + 8));
554       const __m128i vxi22x89ABCDEF = _mm_cvtepi8_epi16(vi22x89ABCDEF);
555       const __m128i vk22x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 360 * sizeof(int8_t)));
556       const __m128i vxk22x89ABCDEF = _mm_cvtepi8_epi16(vk22x89ABCDEF);
557       i22 += 16;
558 
559 
560       vprod01234567 = _mm_mullo_epi16(vxi22x01234567, vxk22x01234567);
561       vprod89ABCDEF = _mm_mullo_epi16(vxi22x89ABCDEF, vxk22x89ABCDEF);
562 
563 
564       const __m128i vi23x01234567 = _mm_loadl_epi64((const __m128i*) i23);
565       const __m128i vxi23x01234567 = _mm_cvtepi8_epi16(vi23x01234567);
566       const __m128i vk23x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 368 * sizeof(int8_t)));
567       const __m128i vxk23x01234567 = _mm_cvtepi8_epi16(vk23x01234567);
568       const __m128i vi23x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i23 + 8));
569       const __m128i vxi23x89ABCDEF = _mm_cvtepi8_epi16(vi23x89ABCDEF);
570       const __m128i vk23x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 376 * sizeof(int8_t)));
571       const __m128i vxk23x89ABCDEF = _mm_cvtepi8_epi16(vk23x89ABCDEF);
572       i23 += 16;
573 
574 
575       vprod01234567 = _mm_macc_epi16(vxi23x01234567, vxk23x01234567, vprod01234567);
576       vprod89ABCDEF = _mm_macc_epi16(vxi23x89ABCDEF, vxk23x89ABCDEF, vprod89ABCDEF);
577 
578       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
579       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
580       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
581       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
582 
583       const __m128i vi24x01234567 = _mm_loadl_epi64((const __m128i*) i24);
584       const __m128i vxi24x01234567 = _mm_cvtepi8_epi16(vi24x01234567);
585       const __m128i vk24x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 384 * sizeof(int8_t)));
586       const __m128i vxk24x01234567 = _mm_cvtepi8_epi16(vk24x01234567);
587       const __m128i vi24x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i24 + 8));
588       const __m128i vxi24x89ABCDEF = _mm_cvtepi8_epi16(vi24x89ABCDEF);
589       const __m128i vk24x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 392 * sizeof(int8_t)));
590       const __m128i vxk24x89ABCDEF = _mm_cvtepi8_epi16(vk24x89ABCDEF);
591       i24 += 16;
592 
593 
594       vprod01234567 = _mm_mullo_epi16(vxi24x01234567, vxk24x01234567);
595       vprod89ABCDEF = _mm_mullo_epi16(vxi24x89ABCDEF, vxk24x89ABCDEF);
596 
597       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
598       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
599       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
600       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
601 
602       w = (const void*) ((uintptr_t) w + 16 * sizeof(int32_t) + 400 * sizeof(int8_t));
603 
604       __m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
605       __m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
606       __m128 vscaled89AB = _mm_cvtepi32_ps(vacc89AB);
607       __m128 vscaledCDEF = _mm_cvtepi32_ps(vaccCDEF);
608 
609       const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
610       const __m128 vscale4567 = _mm_loadu_ps((const float*) w + 4);
611       const __m128 vscale89AB = _mm_loadu_ps((const float*) w + 8);
612       const __m128 vscaleCDEF = _mm_loadu_ps((const float*) w + 12);
613       w = (const void*) ((const float*) w + 16);
614       vscaled0123 = _mm_mul_ps(vscaled0123, vscale0123);
615       vscaled4567 = _mm_mul_ps(vscaled4567, vscale4567);
616       vscaled89AB = _mm_mul_ps(vscaled89AB, vscale89AB);
617       vscaledCDEF = _mm_mul_ps(vscaledCDEF, vscaleCDEF);
618 
619       const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
620       vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
621       vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
622       vscaled89AB = _mm_min_ps(vscaled89AB, voutput_max_less_zero_point);
623       vscaledCDEF = _mm_min_ps(vscaledCDEF, voutput_max_less_zero_point);
624 
625       vacc0123 = _mm_cvtps_epi32(vscaled0123);
626       vacc4567 = _mm_cvtps_epi32(vscaled4567);
627       vacc89AB = _mm_cvtps_epi32(vscaled89AB);
628       vaccCDEF = _mm_cvtps_epi32(vscaledCDEF);
629 
630       const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
631       __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
632       __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
633 
634 
635       __m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
636 
637       const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
638       vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
639 
640       _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
641       output += 16;
642     }
643     if XNN_UNLIKELY(c != 0) {
644       const int8_t* k = (const int8_t*) ((const int32_t*) w + 16);
645       do {
646         __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
647         __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
648 
649 
650         const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
651         const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
652         const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) k);
653         const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
654         i0 += 8;
655 
656 
657         __m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
658 
659 
660         const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
661         const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
662         const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) (k + 16));
663         const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
664         i1 += 8;
665 
666 
667         vprod01234567 = _mm_macc_epi16(vxi1x01234567, vxk1x01234567, vprod01234567);
668 
669         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
670         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
671 
672         const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
673         const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
674         const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) (k + 32));
675         const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
676         i2 += 8;
677 
678 
679         vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
680 
681 
682         const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
683         const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
684         const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) (k + 48));
685         const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
686         i3 += 8;
687 
688 
689         vprod01234567 = _mm_macc_epi16(vxi3x01234567, vxk3x01234567, vprod01234567);
690 
691         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
692         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
693 
694         const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
695         const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
696         const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) (k + 64));
697         const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
698         i4 += 8;
699 
700 
701         vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
702 
703 
704         const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
705         const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
706         const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) (k + 80));
707         const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
708         i5 += 8;
709 
710 
711         vprod01234567 = _mm_macc_epi16(vxi5x01234567, vxk5x01234567, vprod01234567);
712 
713         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
714         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
715 
716         const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
717         const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
718         const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) (k + 96));
719         const __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
720         i6 += 8;
721 
722 
723         vprod01234567 = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
724 
725 
726         const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
727         const __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
728         const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) (k + 112));
729         const __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
730         i7 += 8;
731 
732 
733         vprod01234567 = _mm_macc_epi16(vxi7x01234567, vxk7x01234567, vprod01234567);
734 
735         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
736         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
737 
738         const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
739         const __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
740         const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) (k + 128));
741         const __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
742         i8 += 8;
743 
744 
745         vprod01234567 = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
746 
747 
748         const __m128i vi9x01234567 = _mm_loadl_epi64((const __m128i*) i9);
749         const __m128i vxi9x01234567 = _mm_cvtepi8_epi16(vi9x01234567);
750         const __m128i vk9x01234567 = _mm_loadl_epi64((const __m128i*) (k + 144));
751         const __m128i vxk9x01234567 = _mm_cvtepi8_epi16(vk9x01234567);
752         i9 += 8;
753 
754 
755         vprod01234567 = _mm_macc_epi16(vxi9x01234567, vxk9x01234567, vprod01234567);
756 
757         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
758         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
759 
760         const __m128i vi10x01234567 = _mm_loadl_epi64((const __m128i*) i10);
761         const __m128i vxi10x01234567 = _mm_cvtepi8_epi16(vi10x01234567);
762         const __m128i vk10x01234567 = _mm_loadl_epi64((const __m128i*) (k + 160));
763         const __m128i vxk10x01234567 = _mm_cvtepi8_epi16(vk10x01234567);
764         i10 += 8;
765 
766 
767         vprod01234567 = _mm_mullo_epi16(vxi10x01234567, vxk10x01234567);
768 
769 
770         const __m128i vi11x01234567 = _mm_loadl_epi64((const __m128i*) i11);
771         const __m128i vxi11x01234567 = _mm_cvtepi8_epi16(vi11x01234567);
772         const __m128i vk11x01234567 = _mm_loadl_epi64((const __m128i*) (k + 176));
773         const __m128i vxk11x01234567 = _mm_cvtepi8_epi16(vk11x01234567);
774         i11 += 8;
775 
776 
777         vprod01234567 = _mm_macc_epi16(vxi11x01234567, vxk11x01234567, vprod01234567);
778 
779         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
780         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
781 
782         const __m128i vi12x01234567 = _mm_loadl_epi64((const __m128i*) i12);
783         const __m128i vxi12x01234567 = _mm_cvtepi8_epi16(vi12x01234567);
784         const __m128i vk12x01234567 = _mm_loadl_epi64((const __m128i*) (k + 192));
785         const __m128i vxk12x01234567 = _mm_cvtepi8_epi16(vk12x01234567);
786         i12 += 8;
787 
788 
789         vprod01234567 = _mm_mullo_epi16(vxi12x01234567, vxk12x01234567);
790 
791 
792         const __m128i vi13x01234567 = _mm_loadl_epi64((const __m128i*) i13);
793         const __m128i vxi13x01234567 = _mm_cvtepi8_epi16(vi13x01234567);
794         const __m128i vk13x01234567 = _mm_loadl_epi64((const __m128i*) (k + 208));
795         const __m128i vxk13x01234567 = _mm_cvtepi8_epi16(vk13x01234567);
796         i13 += 8;
797 
798 
799         vprod01234567 = _mm_macc_epi16(vxi13x01234567, vxk13x01234567, vprod01234567);
800 
801         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
802         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
803 
804         const __m128i vi14x01234567 = _mm_loadl_epi64((const __m128i*) i14);
805         const __m128i vxi14x01234567 = _mm_cvtepi8_epi16(vi14x01234567);
806         const __m128i vk14x01234567 = _mm_loadl_epi64((const __m128i*) (k + 224));
807         const __m128i vxk14x01234567 = _mm_cvtepi8_epi16(vk14x01234567);
808         i14 += 8;
809 
810 
811         vprod01234567 = _mm_mullo_epi16(vxi14x01234567, vxk14x01234567);
812 
813 
814         const __m128i vi15x01234567 = _mm_loadl_epi64((const __m128i*) i15);
815         const __m128i vxi15x01234567 = _mm_cvtepi8_epi16(vi15x01234567);
816         const __m128i vk15x01234567 = _mm_loadl_epi64((const __m128i*) (k + 240));
817         const __m128i vxk15x01234567 = _mm_cvtepi8_epi16(vk15x01234567);
818         i15 += 8;
819 
820 
821         vprod01234567 = _mm_macc_epi16(vxi15x01234567, vxk15x01234567, vprod01234567);
822 
823         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
824         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
825 
826         const __m128i vi16x01234567 = _mm_loadl_epi64((const __m128i*) i16);
827         const __m128i vxi16x01234567 = _mm_cvtepi8_epi16(vi16x01234567);
828         const __m128i vk16x01234567 = _mm_loadl_epi64((const __m128i*) (k + 256));
829         const __m128i vxk16x01234567 = _mm_cvtepi8_epi16(vk16x01234567);
830         i16 += 8;
831 
832 
833         vprod01234567 = _mm_mullo_epi16(vxi16x01234567, vxk16x01234567);
834 
835 
836         const __m128i vi17x01234567 = _mm_loadl_epi64((const __m128i*) i17);
837         const __m128i vxi17x01234567 = _mm_cvtepi8_epi16(vi17x01234567);
838         const __m128i vk17x01234567 = _mm_loadl_epi64((const __m128i*) (k + 272));
839         const __m128i vxk17x01234567 = _mm_cvtepi8_epi16(vk17x01234567);
840         i17 += 8;
841 
842 
843         vprod01234567 = _mm_macc_epi16(vxi17x01234567, vxk17x01234567, vprod01234567);
844 
845         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
846         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
847 
848         const __m128i vi18x01234567 = _mm_loadl_epi64((const __m128i*) i18);
849         const __m128i vxi18x01234567 = _mm_cvtepi8_epi16(vi18x01234567);
850         const __m128i vk18x01234567 = _mm_loadl_epi64((const __m128i*) (k + 288));
851         const __m128i vxk18x01234567 = _mm_cvtepi8_epi16(vk18x01234567);
852         i18 += 8;
853 
854 
855         vprod01234567 = _mm_mullo_epi16(vxi18x01234567, vxk18x01234567);
856 
857 
858         const __m128i vi19x01234567 = _mm_loadl_epi64((const __m128i*) i19);
859         const __m128i vxi19x01234567 = _mm_cvtepi8_epi16(vi19x01234567);
860         const __m128i vk19x01234567 = _mm_loadl_epi64((const __m128i*) (k + 304));
861         const __m128i vxk19x01234567 = _mm_cvtepi8_epi16(vk19x01234567);
862         i19 += 8;
863 
864 
865         vprod01234567 = _mm_macc_epi16(vxi19x01234567, vxk19x01234567, vprod01234567);
866 
867         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
868         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
869 
870         const __m128i vi20x01234567 = _mm_loadl_epi64((const __m128i*) i20);
871         const __m128i vxi20x01234567 = _mm_cvtepi8_epi16(vi20x01234567);
872         const __m128i vk20x01234567 = _mm_loadl_epi64((const __m128i*) (k + 320));
873         const __m128i vxk20x01234567 = _mm_cvtepi8_epi16(vk20x01234567);
874         i20 += 8;
875 
876 
877         vprod01234567 = _mm_mullo_epi16(vxi20x01234567, vxk20x01234567);
878 
879 
880         const __m128i vi21x01234567 = _mm_loadl_epi64((const __m128i*) i21);
881         const __m128i vxi21x01234567 = _mm_cvtepi8_epi16(vi21x01234567);
882         const __m128i vk21x01234567 = _mm_loadl_epi64((const __m128i*) (k + 336));
883         const __m128i vxk21x01234567 = _mm_cvtepi8_epi16(vk21x01234567);
884         i21 += 8;
885 
886 
887         vprod01234567 = _mm_macc_epi16(vxi21x01234567, vxk21x01234567, vprod01234567);
888 
889         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
890         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
891 
892         const __m128i vi22x01234567 = _mm_loadl_epi64((const __m128i*) i22);
893         const __m128i vxi22x01234567 = _mm_cvtepi8_epi16(vi22x01234567);
894         const __m128i vk22x01234567 = _mm_loadl_epi64((const __m128i*) (k + 352));
895         const __m128i vxk22x01234567 = _mm_cvtepi8_epi16(vk22x01234567);
896         i22 += 8;
897 
898 
899         vprod01234567 = _mm_mullo_epi16(vxi22x01234567, vxk22x01234567);
900 
901 
902         const __m128i vi23x01234567 = _mm_loadl_epi64((const __m128i*) i23);
903         const __m128i vxi23x01234567 = _mm_cvtepi8_epi16(vi23x01234567);
904         const __m128i vk23x01234567 = _mm_loadl_epi64((const __m128i*) (k + 368));
905         const __m128i vxk23x01234567 = _mm_cvtepi8_epi16(vk23x01234567);
906         i23 += 8;
907 
908 
909         vprod01234567 = _mm_macc_epi16(vxi23x01234567, vxk23x01234567, vprod01234567);
910 
911         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
912         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
913 
914         const __m128i vi24x01234567 = _mm_loadl_epi64((const __m128i*) i24);
915         const __m128i vxi24x01234567 = _mm_cvtepi8_epi16(vi24x01234567);
916         const __m128i vk24x01234567 = _mm_loadl_epi64((const __m128i*) (k + 384));
917         const __m128i vxk24x01234567 = _mm_cvtepi8_epi16(vk24x01234567);
918         i24 += 8;
919 
920 
921         vprod01234567 = _mm_mullo_epi16(vxi24x01234567, vxk24x01234567);
922 
923         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
924         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
925 
926         k += 8;
927 
928         __m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
929         __m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
930 
931         const __m128 vscale0123 = _mm_loadu_ps((const float*) ((uintptr_t) w + 16 * sizeof(int32_t) + 400 * sizeof(int8_t)));
932         const __m128 vscale4567 = _mm_loadu_ps((const float*) ((uintptr_t) w + 16 * sizeof(int32_t) + 400 * sizeof(int8_t) + 4 * sizeof(float)));
933         vscaled0123 = _mm_mul_ps(vscaled0123, vscale0123);
934         vscaled4567 = _mm_mul_ps(vscaled4567, vscale4567);
935 
936         const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
937         vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
938         vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
939 
940         vacc0123 = _mm_cvtps_epi32(vscaled0123);
941         vacc4567 = _mm_cvtps_epi32(vscaled4567);
942 
943         w = (const void*) ((const int32_t*) w + 8);
944 
945         const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
946         __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
947 
948 
949         __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
950 
951         vout0123456701234567 = _mm_max_epi8(vout0123456701234567, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
952 
953         if XNN_LIKELY(c >= 8) {
954           _mm_storel_epi64((__m128i*) output, vout0123456701234567);
955           output += 8;
956           c -= 8;
957         } else {
958           if (c & 4) {
959             unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
960             vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
961             output += 4;
962           }
963           if (c & 2) {
964             unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
965             vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
966             output += 2;
967           }
968           if (c & 1) {
969             *output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
970             output += 1;
971           }
972           c = 0;
973         }
974       } while (c != 0);
975     }
976 
977     output = (int8_t*) ((uintptr_t) output + output_increment);
978   } while (--output_width != 0);
979 }
980