xref: /aosp_15_r20/external/XNNPACK/src/qs8-dwconv/gen/up24x25-minmax-fp32-sse41-mul16.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-dwconv/unipass-sse-mul16.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <smmintrin.h>
13 
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/unaligned.h>
16 
17 
xnn_qs8_dwconv_minmax_fp32_ukernel_up24x25__sse41_mul16(size_t channels,size_t output_width,const int8_t ** input,const void * weights,int8_t * output,size_t input_stride,size_t output_increment,size_t input_offset,const int8_t * zero,const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qs8_dwconv_minmax_fp32_ukernel_up24x25__sse41_mul16(
19     size_t channels,
20     size_t output_width,
21     const int8_t** input,
22     const void* weights,
23     int8_t* output,
24     size_t input_stride,
25     size_t output_increment,
26     size_t input_offset,
27     const int8_t* zero,
28     const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
29 {
30   assert(channels != 0);
31   assert(output_width != 0);
32 
33   do {
34     const int8_t* i0 = input[0];
35     assert(i0 != NULL);
36     if XNN_UNPREDICTABLE(i0 != zero) {
37       i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
38     }
39     const int8_t* i1 = input[1];
40     assert(i1 != NULL);
41     if XNN_UNPREDICTABLE(i1 != zero) {
42       i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
43     }
44     const int8_t* i2 = input[2];
45     assert(i2 != NULL);
46     if XNN_UNPREDICTABLE(i2 != zero) {
47       i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
48     }
49     const int8_t* i3 = input[3];
50     assert(i3 != NULL);
51     if XNN_UNPREDICTABLE(i3 != zero) {
52       i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
53     }
54     const int8_t* i4 = input[4];
55     assert(i4 != NULL);
56     if XNN_UNPREDICTABLE(i4 != zero) {
57       i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
58     }
59     const int8_t* i5 = input[5];
60     assert(i5 != NULL);
61     if XNN_UNPREDICTABLE(i5 != zero) {
62       i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
63     }
64     const int8_t* i6 = input[6];
65     assert(i6 != NULL);
66     if XNN_UNPREDICTABLE(i6 != zero) {
67       i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
68     }
69     const int8_t* i7 = input[7];
70     assert(i7 != NULL);
71     if XNN_UNPREDICTABLE(i7 != zero) {
72       i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
73     }
74     const int8_t* i8 = input[8];
75     assert(i8 != NULL);
76     if XNN_UNPREDICTABLE(i8 != zero) {
77       i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
78     }
79     const int8_t* i9 = input[9];
80     assert(i9 != NULL);
81     if XNN_UNPREDICTABLE(i9 != zero) {
82       i9 = (const int8_t*) ((uintptr_t) i9 + input_offset);
83     }
84     const int8_t* i10 = input[10];
85     assert(i10 != NULL);
86     if XNN_UNPREDICTABLE(i10 != zero) {
87       i10 = (const int8_t*) ((uintptr_t) i10 + input_offset);
88     }
89     const int8_t* i11 = input[11];
90     assert(i11 != NULL);
91     if XNN_UNPREDICTABLE(i11 != zero) {
92       i11 = (const int8_t*) ((uintptr_t) i11 + input_offset);
93     }
94     const int8_t* i12 = input[12];
95     assert(i12 != NULL);
96     if XNN_UNPREDICTABLE(i12 != zero) {
97       i12 = (const int8_t*) ((uintptr_t) i12 + input_offset);
98     }
99     const int8_t* i13 = input[13];
100     assert(i13 != NULL);
101     if XNN_UNPREDICTABLE(i13 != zero) {
102       i13 = (const int8_t*) ((uintptr_t) i13 + input_offset);
103     }
104     const int8_t* i14 = input[14];
105     assert(i14 != NULL);
106     if XNN_UNPREDICTABLE(i14 != zero) {
107       i14 = (const int8_t*) ((uintptr_t) i14 + input_offset);
108     }
109     const int8_t* i15 = input[15];
110     assert(i15 != NULL);
111     if XNN_UNPREDICTABLE(i15 != zero) {
112       i15 = (const int8_t*) ((uintptr_t) i15 + input_offset);
113     }
114     const int8_t* i16 = input[16];
115     assert(i16 != NULL);
116     if XNN_UNPREDICTABLE(i16 != zero) {
117       i16 = (const int8_t*) ((uintptr_t) i16 + input_offset);
118     }
119     const int8_t* i17 = input[17];
120     assert(i17 != NULL);
121     if XNN_UNPREDICTABLE(i17 != zero) {
122       i17 = (const int8_t*) ((uintptr_t) i17 + input_offset);
123     }
124     const int8_t* i18 = input[18];
125     assert(i18 != NULL);
126     if XNN_UNPREDICTABLE(i18 != zero) {
127       i18 = (const int8_t*) ((uintptr_t) i18 + input_offset);
128     }
129     const int8_t* i19 = input[19];
130     assert(i19 != NULL);
131     if XNN_UNPREDICTABLE(i19 != zero) {
132       i19 = (const int8_t*) ((uintptr_t) i19 + input_offset);
133     }
134     const int8_t* i20 = input[20];
135     assert(i20 != NULL);
136     if XNN_UNPREDICTABLE(i20 != zero) {
137       i20 = (const int8_t*) ((uintptr_t) i20 + input_offset);
138     }
139     const int8_t* i21 = input[21];
140     assert(i21 != NULL);
141     if XNN_UNPREDICTABLE(i21 != zero) {
142       i21 = (const int8_t*) ((uintptr_t) i21 + input_offset);
143     }
144     const int8_t* i22 = input[22];
145     assert(i22 != NULL);
146     if XNN_UNPREDICTABLE(i22 != zero) {
147       i22 = (const int8_t*) ((uintptr_t) i22 + input_offset);
148     }
149     const int8_t* i23 = input[23];
150     assert(i23 != NULL);
151     if XNN_UNPREDICTABLE(i23 != zero) {
152       i23 = (const int8_t*) ((uintptr_t) i23 + input_offset);
153     }
154     const int8_t* i24 = input[24];
155     assert(i24 != NULL);
156     if XNN_UNPREDICTABLE(i24 != zero) {
157       i24 = (const int8_t*) ((uintptr_t) i24 + input_offset);
158     }
159     input = (const int8_t**) ((uintptr_t) input + input_stride);
160 
161     size_t c = channels;
162     const void* w = weights;
163     for (; c >= 24; c -= 24) {
164       __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
165       __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
166       __m128i vacc89AB = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 8));
167       __m128i vaccCDEF = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 12));
168       __m128i vaccGHIJ = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 16));
169       __m128i vaccKLMN = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 20));
170 
171 
172       const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
173       const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
174       const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 0 * sizeof(int8_t)));
175       const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
176       const __m128i vi0x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i0 + 8));
177       const __m128i vxi0x89ABCDEF = _mm_cvtepi8_epi16(vi0x89ABCDEF);
178       const __m128i vk0x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 8 * sizeof(int8_t)));
179       const __m128i vxk0x89ABCDEF = _mm_cvtepi8_epi16(vk0x89ABCDEF);
180       const __m128i vi0xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i0 + 16));
181       const __m128i vxi0xGHIJKLMN = _mm_cvtepi8_epi16(vi0xGHIJKLMN);
182       const __m128i vk0xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 16 * sizeof(int8_t)));
183       const __m128i vxk0xGHIJKLMN = _mm_cvtepi8_epi16(vk0xGHIJKLMN);
184       i0 += 24;
185 
186 
187       __m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
188       __m128i vprod89ABCDEF = _mm_mullo_epi16(vxi0x89ABCDEF, vxk0x89ABCDEF);
189       __m128i vprodGHIJKLMN = _mm_mullo_epi16(vxi0xGHIJKLMN, vxk0xGHIJKLMN);
190 
191       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
192       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
193       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
194       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
195       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
196       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
197 
198       const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
199       const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
200       const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 24 * sizeof(int8_t)));
201       const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
202       const __m128i vi1x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i1 + 8));
203       const __m128i vxi1x89ABCDEF = _mm_cvtepi8_epi16(vi1x89ABCDEF);
204       const __m128i vk1x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 32 * sizeof(int8_t)));
205       const __m128i vxk1x89ABCDEF = _mm_cvtepi8_epi16(vk1x89ABCDEF);
206       const __m128i vi1xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i1 + 16));
207       const __m128i vxi1xGHIJKLMN = _mm_cvtepi8_epi16(vi1xGHIJKLMN);
208       const __m128i vk1xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 40 * sizeof(int8_t)));
209       const __m128i vxk1xGHIJKLMN = _mm_cvtepi8_epi16(vk1xGHIJKLMN);
210       i1 += 24;
211 
212 
213       vprod01234567 = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
214       vprod89ABCDEF = _mm_mullo_epi16(vxi1x89ABCDEF, vxk1x89ABCDEF);
215       vprodGHIJKLMN = _mm_mullo_epi16(vxi1xGHIJKLMN, vxk1xGHIJKLMN);
216 
217       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
218       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
219       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
220       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
221       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
222       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
223 
224       const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
225       const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
226       const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 48 * sizeof(int8_t)));
227       const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
228       const __m128i vi2x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i2 + 8));
229       const __m128i vxi2x89ABCDEF = _mm_cvtepi8_epi16(vi2x89ABCDEF);
230       const __m128i vk2x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 56 * sizeof(int8_t)));
231       const __m128i vxk2x89ABCDEF = _mm_cvtepi8_epi16(vk2x89ABCDEF);
232       const __m128i vi2xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i2 + 16));
233       const __m128i vxi2xGHIJKLMN = _mm_cvtepi8_epi16(vi2xGHIJKLMN);
234       const __m128i vk2xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 64 * sizeof(int8_t)));
235       const __m128i vxk2xGHIJKLMN = _mm_cvtepi8_epi16(vk2xGHIJKLMN);
236       i2 += 24;
237 
238 
239       vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
240       vprod89ABCDEF = _mm_mullo_epi16(vxi2x89ABCDEF, vxk2x89ABCDEF);
241       vprodGHIJKLMN = _mm_mullo_epi16(vxi2xGHIJKLMN, vxk2xGHIJKLMN);
242 
243       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
244       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
245       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
246       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
247       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
248       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
249 
250       const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
251       const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
252       const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 72 * sizeof(int8_t)));
253       const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
254       const __m128i vi3x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i3 + 8));
255       const __m128i vxi3x89ABCDEF = _mm_cvtepi8_epi16(vi3x89ABCDEF);
256       const __m128i vk3x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 80 * sizeof(int8_t)));
257       const __m128i vxk3x89ABCDEF = _mm_cvtepi8_epi16(vk3x89ABCDEF);
258       const __m128i vi3xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i3 + 16));
259       const __m128i vxi3xGHIJKLMN = _mm_cvtepi8_epi16(vi3xGHIJKLMN);
260       const __m128i vk3xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 88 * sizeof(int8_t)));
261       const __m128i vxk3xGHIJKLMN = _mm_cvtepi8_epi16(vk3xGHIJKLMN);
262       i3 += 24;
263 
264 
265       vprod01234567 = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
266       vprod89ABCDEF = _mm_mullo_epi16(vxi3x89ABCDEF, vxk3x89ABCDEF);
267       vprodGHIJKLMN = _mm_mullo_epi16(vxi3xGHIJKLMN, vxk3xGHIJKLMN);
268 
269       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
270       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
271       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
272       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
273       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
274       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
275 
276       const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
277       const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
278       const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 96 * sizeof(int8_t)));
279       const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
280       const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8));
281       const __m128i vxi4x89ABCDEF = _mm_cvtepi8_epi16(vi4x89ABCDEF);
282       const __m128i vk4x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 104 * sizeof(int8_t)));
283       const __m128i vxk4x89ABCDEF = _mm_cvtepi8_epi16(vk4x89ABCDEF);
284       const __m128i vi4xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i4 + 16));
285       const __m128i vxi4xGHIJKLMN = _mm_cvtepi8_epi16(vi4xGHIJKLMN);
286       const __m128i vk4xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 112 * sizeof(int8_t)));
287       const __m128i vxk4xGHIJKLMN = _mm_cvtepi8_epi16(vk4xGHIJKLMN);
288       i4 += 24;
289 
290 
291       vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
292       vprod89ABCDEF = _mm_mullo_epi16(vxi4x89ABCDEF, vxk4x89ABCDEF);
293       vprodGHIJKLMN = _mm_mullo_epi16(vxi4xGHIJKLMN, vxk4xGHIJKLMN);
294 
295       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
296       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
297       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
298       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
299       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
300       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
301 
302       const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
303       const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
304       const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 120 * sizeof(int8_t)));
305       const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
306       const __m128i vi5x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i5 + 8));
307       const __m128i vxi5x89ABCDEF = _mm_cvtepi8_epi16(vi5x89ABCDEF);
308       const __m128i vk5x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 128 * sizeof(int8_t)));
309       const __m128i vxk5x89ABCDEF = _mm_cvtepi8_epi16(vk5x89ABCDEF);
310       const __m128i vi5xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i5 + 16));
311       const __m128i vxi5xGHIJKLMN = _mm_cvtepi8_epi16(vi5xGHIJKLMN);
312       const __m128i vk5xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 136 * sizeof(int8_t)));
313       const __m128i vxk5xGHIJKLMN = _mm_cvtepi8_epi16(vk5xGHIJKLMN);
314       i5 += 24;
315 
316 
317       vprod01234567 = _mm_mullo_epi16(vxi5x01234567, vxk5x01234567);
318       vprod89ABCDEF = _mm_mullo_epi16(vxi5x89ABCDEF, vxk5x89ABCDEF);
319       vprodGHIJKLMN = _mm_mullo_epi16(vxi5xGHIJKLMN, vxk5xGHIJKLMN);
320 
321       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
322       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
323       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
324       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
325       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
326       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
327 
328       const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
329       const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
330       const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 144 * sizeof(int8_t)));
331       const __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
332       const __m128i vi6x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i6 + 8));
333       const __m128i vxi6x89ABCDEF = _mm_cvtepi8_epi16(vi6x89ABCDEF);
334       const __m128i vk6x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 152 * sizeof(int8_t)));
335       const __m128i vxk6x89ABCDEF = _mm_cvtepi8_epi16(vk6x89ABCDEF);
336       const __m128i vi6xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i6 + 16));
337       const __m128i vxi6xGHIJKLMN = _mm_cvtepi8_epi16(vi6xGHIJKLMN);
338       const __m128i vk6xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 160 * sizeof(int8_t)));
339       const __m128i vxk6xGHIJKLMN = _mm_cvtepi8_epi16(vk6xGHIJKLMN);
340       i6 += 24;
341 
342 
343       vprod01234567 = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
344       vprod89ABCDEF = _mm_mullo_epi16(vxi6x89ABCDEF, vxk6x89ABCDEF);
345       vprodGHIJKLMN = _mm_mullo_epi16(vxi6xGHIJKLMN, vxk6xGHIJKLMN);
346 
347       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
348       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
349       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
350       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
351       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
352       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
353 
354       const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
355       const __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
356       const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 168 * sizeof(int8_t)));
357       const __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
358       const __m128i vi7x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i7 + 8));
359       const __m128i vxi7x89ABCDEF = _mm_cvtepi8_epi16(vi7x89ABCDEF);
360       const __m128i vk7x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 176 * sizeof(int8_t)));
361       const __m128i vxk7x89ABCDEF = _mm_cvtepi8_epi16(vk7x89ABCDEF);
362       const __m128i vi7xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i7 + 16));
363       const __m128i vxi7xGHIJKLMN = _mm_cvtepi8_epi16(vi7xGHIJKLMN);
364       const __m128i vk7xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 184 * sizeof(int8_t)));
365       const __m128i vxk7xGHIJKLMN = _mm_cvtepi8_epi16(vk7xGHIJKLMN);
366       i7 += 24;
367 
368 
369       vprod01234567 = _mm_mullo_epi16(vxi7x01234567, vxk7x01234567);
370       vprod89ABCDEF = _mm_mullo_epi16(vxi7x89ABCDEF, vxk7x89ABCDEF);
371       vprodGHIJKLMN = _mm_mullo_epi16(vxi7xGHIJKLMN, vxk7xGHIJKLMN);
372 
373       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
374       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
375       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
376       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
377       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
378       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
379 
380       const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
381       const __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
382       const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 192 * sizeof(int8_t)));
383       const __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
384       const __m128i vi8x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i8 + 8));
385       const __m128i vxi8x89ABCDEF = _mm_cvtepi8_epi16(vi8x89ABCDEF);
386       const __m128i vk8x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 200 * sizeof(int8_t)));
387       const __m128i vxk8x89ABCDEF = _mm_cvtepi8_epi16(vk8x89ABCDEF);
388       const __m128i vi8xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i8 + 16));
389       const __m128i vxi8xGHIJKLMN = _mm_cvtepi8_epi16(vi8xGHIJKLMN);
390       const __m128i vk8xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 208 * sizeof(int8_t)));
391       const __m128i vxk8xGHIJKLMN = _mm_cvtepi8_epi16(vk8xGHIJKLMN);
392       i8 += 24;
393 
394 
395       vprod01234567 = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
396       vprod89ABCDEF = _mm_mullo_epi16(vxi8x89ABCDEF, vxk8x89ABCDEF);
397       vprodGHIJKLMN = _mm_mullo_epi16(vxi8xGHIJKLMN, vxk8xGHIJKLMN);
398 
399       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
400       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
401       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
402       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
403       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
404       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
405 
406       const __m128i vi9x01234567 = _mm_loadl_epi64((const __m128i*) i9);
407       const __m128i vxi9x01234567 = _mm_cvtepi8_epi16(vi9x01234567);
408       const __m128i vk9x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 216 * sizeof(int8_t)));
409       const __m128i vxk9x01234567 = _mm_cvtepi8_epi16(vk9x01234567);
410       const __m128i vi9x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i9 + 8));
411       const __m128i vxi9x89ABCDEF = _mm_cvtepi8_epi16(vi9x89ABCDEF);
412       const __m128i vk9x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 224 * sizeof(int8_t)));
413       const __m128i vxk9x89ABCDEF = _mm_cvtepi8_epi16(vk9x89ABCDEF);
414       const __m128i vi9xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i9 + 16));
415       const __m128i vxi9xGHIJKLMN = _mm_cvtepi8_epi16(vi9xGHIJKLMN);
416       const __m128i vk9xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 232 * sizeof(int8_t)));
417       const __m128i vxk9xGHIJKLMN = _mm_cvtepi8_epi16(vk9xGHIJKLMN);
418       i9 += 24;
419 
420 
421       vprod01234567 = _mm_mullo_epi16(vxi9x01234567, vxk9x01234567);
422       vprod89ABCDEF = _mm_mullo_epi16(vxi9x89ABCDEF, vxk9x89ABCDEF);
423       vprodGHIJKLMN = _mm_mullo_epi16(vxi9xGHIJKLMN, vxk9xGHIJKLMN);
424 
425       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
426       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
427       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
428       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
429       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
430       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
431 
432       const __m128i vi10x01234567 = _mm_loadl_epi64((const __m128i*) i10);
433       const __m128i vxi10x01234567 = _mm_cvtepi8_epi16(vi10x01234567);
434       const __m128i vk10x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 240 * sizeof(int8_t)));
435       const __m128i vxk10x01234567 = _mm_cvtepi8_epi16(vk10x01234567);
436       const __m128i vi10x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i10 + 8));
437       const __m128i vxi10x89ABCDEF = _mm_cvtepi8_epi16(vi10x89ABCDEF);
438       const __m128i vk10x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 248 * sizeof(int8_t)));
439       const __m128i vxk10x89ABCDEF = _mm_cvtepi8_epi16(vk10x89ABCDEF);
440       const __m128i vi10xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i10 + 16));
441       const __m128i vxi10xGHIJKLMN = _mm_cvtepi8_epi16(vi10xGHIJKLMN);
442       const __m128i vk10xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 256 * sizeof(int8_t)));
443       const __m128i vxk10xGHIJKLMN = _mm_cvtepi8_epi16(vk10xGHIJKLMN);
444       i10 += 24;
445 
446 
447       vprod01234567 = _mm_mullo_epi16(vxi10x01234567, vxk10x01234567);
448       vprod89ABCDEF = _mm_mullo_epi16(vxi10x89ABCDEF, vxk10x89ABCDEF);
449       vprodGHIJKLMN = _mm_mullo_epi16(vxi10xGHIJKLMN, vxk10xGHIJKLMN);
450 
451       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
452       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
453       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
454       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
455       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
456       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
457 
458       const __m128i vi11x01234567 = _mm_loadl_epi64((const __m128i*) i11);
459       const __m128i vxi11x01234567 = _mm_cvtepi8_epi16(vi11x01234567);
460       const __m128i vk11x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 264 * sizeof(int8_t)));
461       const __m128i vxk11x01234567 = _mm_cvtepi8_epi16(vk11x01234567);
462       const __m128i vi11x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i11 + 8));
463       const __m128i vxi11x89ABCDEF = _mm_cvtepi8_epi16(vi11x89ABCDEF);
464       const __m128i vk11x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 272 * sizeof(int8_t)));
465       const __m128i vxk11x89ABCDEF = _mm_cvtepi8_epi16(vk11x89ABCDEF);
466       const __m128i vi11xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i11 + 16));
467       const __m128i vxi11xGHIJKLMN = _mm_cvtepi8_epi16(vi11xGHIJKLMN);
468       const __m128i vk11xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 280 * sizeof(int8_t)));
469       const __m128i vxk11xGHIJKLMN = _mm_cvtepi8_epi16(vk11xGHIJKLMN);
470       i11 += 24;
471 
472 
473       vprod01234567 = _mm_mullo_epi16(vxi11x01234567, vxk11x01234567);
474       vprod89ABCDEF = _mm_mullo_epi16(vxi11x89ABCDEF, vxk11x89ABCDEF);
475       vprodGHIJKLMN = _mm_mullo_epi16(vxi11xGHIJKLMN, vxk11xGHIJKLMN);
476 
477       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
478       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
479       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
480       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
481       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
482       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
483 
484       const __m128i vi12x01234567 = _mm_loadl_epi64((const __m128i*) i12);
485       const __m128i vxi12x01234567 = _mm_cvtepi8_epi16(vi12x01234567);
486       const __m128i vk12x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 288 * sizeof(int8_t)));
487       const __m128i vxk12x01234567 = _mm_cvtepi8_epi16(vk12x01234567);
488       const __m128i vi12x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i12 + 8));
489       const __m128i vxi12x89ABCDEF = _mm_cvtepi8_epi16(vi12x89ABCDEF);
490       const __m128i vk12x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 296 * sizeof(int8_t)));
491       const __m128i vxk12x89ABCDEF = _mm_cvtepi8_epi16(vk12x89ABCDEF);
492       const __m128i vi12xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i12 + 16));
493       const __m128i vxi12xGHIJKLMN = _mm_cvtepi8_epi16(vi12xGHIJKLMN);
494       const __m128i vk12xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 304 * sizeof(int8_t)));
495       const __m128i vxk12xGHIJKLMN = _mm_cvtepi8_epi16(vk12xGHIJKLMN);
496       i12 += 24;
497 
498 
499       vprod01234567 = _mm_mullo_epi16(vxi12x01234567, vxk12x01234567);
500       vprod89ABCDEF = _mm_mullo_epi16(vxi12x89ABCDEF, vxk12x89ABCDEF);
501       vprodGHIJKLMN = _mm_mullo_epi16(vxi12xGHIJKLMN, vxk12xGHIJKLMN);
502 
503       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
504       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
505       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
506       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
507       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
508       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
509 
510       const __m128i vi13x01234567 = _mm_loadl_epi64((const __m128i*) i13);
511       const __m128i vxi13x01234567 = _mm_cvtepi8_epi16(vi13x01234567);
512       const __m128i vk13x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 312 * sizeof(int8_t)));
513       const __m128i vxk13x01234567 = _mm_cvtepi8_epi16(vk13x01234567);
514       const __m128i vi13x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i13 + 8));
515       const __m128i vxi13x89ABCDEF = _mm_cvtepi8_epi16(vi13x89ABCDEF);
516       const __m128i vk13x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 320 * sizeof(int8_t)));
517       const __m128i vxk13x89ABCDEF = _mm_cvtepi8_epi16(vk13x89ABCDEF);
518       const __m128i vi13xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i13 + 16));
519       const __m128i vxi13xGHIJKLMN = _mm_cvtepi8_epi16(vi13xGHIJKLMN);
520       const __m128i vk13xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 328 * sizeof(int8_t)));
521       const __m128i vxk13xGHIJKLMN = _mm_cvtepi8_epi16(vk13xGHIJKLMN);
522       i13 += 24;
523 
524 
525       vprod01234567 = _mm_mullo_epi16(vxi13x01234567, vxk13x01234567);
526       vprod89ABCDEF = _mm_mullo_epi16(vxi13x89ABCDEF, vxk13x89ABCDEF);
527       vprodGHIJKLMN = _mm_mullo_epi16(vxi13xGHIJKLMN, vxk13xGHIJKLMN);
528 
529       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
530       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
531       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
532       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
533       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
534       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
535 
536       const __m128i vi14x01234567 = _mm_loadl_epi64((const __m128i*) i14);
537       const __m128i vxi14x01234567 = _mm_cvtepi8_epi16(vi14x01234567);
538       const __m128i vk14x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 336 * sizeof(int8_t)));
539       const __m128i vxk14x01234567 = _mm_cvtepi8_epi16(vk14x01234567);
540       const __m128i vi14x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i14 + 8));
541       const __m128i vxi14x89ABCDEF = _mm_cvtepi8_epi16(vi14x89ABCDEF);
542       const __m128i vk14x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 344 * sizeof(int8_t)));
543       const __m128i vxk14x89ABCDEF = _mm_cvtepi8_epi16(vk14x89ABCDEF);
544       const __m128i vi14xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i14 + 16));
545       const __m128i vxi14xGHIJKLMN = _mm_cvtepi8_epi16(vi14xGHIJKLMN);
546       const __m128i vk14xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 352 * sizeof(int8_t)));
547       const __m128i vxk14xGHIJKLMN = _mm_cvtepi8_epi16(vk14xGHIJKLMN);
548       i14 += 24;
549 
550 
551       vprod01234567 = _mm_mullo_epi16(vxi14x01234567, vxk14x01234567);
552       vprod89ABCDEF = _mm_mullo_epi16(vxi14x89ABCDEF, vxk14x89ABCDEF);
553       vprodGHIJKLMN = _mm_mullo_epi16(vxi14xGHIJKLMN, vxk14xGHIJKLMN);
554 
555       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
556       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
557       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
558       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
559       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
560       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
561 
562       const __m128i vi15x01234567 = _mm_loadl_epi64((const __m128i*) i15);
563       const __m128i vxi15x01234567 = _mm_cvtepi8_epi16(vi15x01234567);
564       const __m128i vk15x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 360 * sizeof(int8_t)));
565       const __m128i vxk15x01234567 = _mm_cvtepi8_epi16(vk15x01234567);
566       const __m128i vi15x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i15 + 8));
567       const __m128i vxi15x89ABCDEF = _mm_cvtepi8_epi16(vi15x89ABCDEF);
568       const __m128i vk15x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 368 * sizeof(int8_t)));
569       const __m128i vxk15x89ABCDEF = _mm_cvtepi8_epi16(vk15x89ABCDEF);
570       const __m128i vi15xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i15 + 16));
571       const __m128i vxi15xGHIJKLMN = _mm_cvtepi8_epi16(vi15xGHIJKLMN);
572       const __m128i vk15xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 376 * sizeof(int8_t)));
573       const __m128i vxk15xGHIJKLMN = _mm_cvtepi8_epi16(vk15xGHIJKLMN);
574       i15 += 24;
575 
576 
577       vprod01234567 = _mm_mullo_epi16(vxi15x01234567, vxk15x01234567);
578       vprod89ABCDEF = _mm_mullo_epi16(vxi15x89ABCDEF, vxk15x89ABCDEF);
579       vprodGHIJKLMN = _mm_mullo_epi16(vxi15xGHIJKLMN, vxk15xGHIJKLMN);
580 
581       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
582       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
583       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
584       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
585       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
586       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
587 
588       const __m128i vi16x01234567 = _mm_loadl_epi64((const __m128i*) i16);
589       const __m128i vxi16x01234567 = _mm_cvtepi8_epi16(vi16x01234567);
590       const __m128i vk16x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 384 * sizeof(int8_t)));
591       const __m128i vxk16x01234567 = _mm_cvtepi8_epi16(vk16x01234567);
592       const __m128i vi16x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i16 + 8));
593       const __m128i vxi16x89ABCDEF = _mm_cvtepi8_epi16(vi16x89ABCDEF);
594       const __m128i vk16x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 392 * sizeof(int8_t)));
595       const __m128i vxk16x89ABCDEF = _mm_cvtepi8_epi16(vk16x89ABCDEF);
596       const __m128i vi16xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i16 + 16));
597       const __m128i vxi16xGHIJKLMN = _mm_cvtepi8_epi16(vi16xGHIJKLMN);
598       const __m128i vk16xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 400 * sizeof(int8_t)));
599       const __m128i vxk16xGHIJKLMN = _mm_cvtepi8_epi16(vk16xGHIJKLMN);
600       i16 += 24;
601 
602 
603       vprod01234567 = _mm_mullo_epi16(vxi16x01234567, vxk16x01234567);
604       vprod89ABCDEF = _mm_mullo_epi16(vxi16x89ABCDEF, vxk16x89ABCDEF);
605       vprodGHIJKLMN = _mm_mullo_epi16(vxi16xGHIJKLMN, vxk16xGHIJKLMN);
606 
607       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
608       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
609       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
610       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
611       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
612       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
613 
614       const __m128i vi17x01234567 = _mm_loadl_epi64((const __m128i*) i17);
615       const __m128i vxi17x01234567 = _mm_cvtepi8_epi16(vi17x01234567);
616       const __m128i vk17x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 408 * sizeof(int8_t)));
617       const __m128i vxk17x01234567 = _mm_cvtepi8_epi16(vk17x01234567);
618       const __m128i vi17x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i17 + 8));
619       const __m128i vxi17x89ABCDEF = _mm_cvtepi8_epi16(vi17x89ABCDEF);
620       const __m128i vk17x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 416 * sizeof(int8_t)));
621       const __m128i vxk17x89ABCDEF = _mm_cvtepi8_epi16(vk17x89ABCDEF);
622       const __m128i vi17xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i17 + 16));
623       const __m128i vxi17xGHIJKLMN = _mm_cvtepi8_epi16(vi17xGHIJKLMN);
624       const __m128i vk17xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 424 * sizeof(int8_t)));
625       const __m128i vxk17xGHIJKLMN = _mm_cvtepi8_epi16(vk17xGHIJKLMN);
626       i17 += 24;
627 
628 
629       vprod01234567 = _mm_mullo_epi16(vxi17x01234567, vxk17x01234567);
630       vprod89ABCDEF = _mm_mullo_epi16(vxi17x89ABCDEF, vxk17x89ABCDEF);
631       vprodGHIJKLMN = _mm_mullo_epi16(vxi17xGHIJKLMN, vxk17xGHIJKLMN);
632 
633       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
634       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
635       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
636       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
637       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
638       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
639 
640       const __m128i vi18x01234567 = _mm_loadl_epi64((const __m128i*) i18);
641       const __m128i vxi18x01234567 = _mm_cvtepi8_epi16(vi18x01234567);
642       const __m128i vk18x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 432 * sizeof(int8_t)));
643       const __m128i vxk18x01234567 = _mm_cvtepi8_epi16(vk18x01234567);
644       const __m128i vi18x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i18 + 8));
645       const __m128i vxi18x89ABCDEF = _mm_cvtepi8_epi16(vi18x89ABCDEF);
646       const __m128i vk18x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 440 * sizeof(int8_t)));
647       const __m128i vxk18x89ABCDEF = _mm_cvtepi8_epi16(vk18x89ABCDEF);
648       const __m128i vi18xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i18 + 16));
649       const __m128i vxi18xGHIJKLMN = _mm_cvtepi8_epi16(vi18xGHIJKLMN);
650       const __m128i vk18xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 448 * sizeof(int8_t)));
651       const __m128i vxk18xGHIJKLMN = _mm_cvtepi8_epi16(vk18xGHIJKLMN);
652       i18 += 24;
653 
654 
655       vprod01234567 = _mm_mullo_epi16(vxi18x01234567, vxk18x01234567);
656       vprod89ABCDEF = _mm_mullo_epi16(vxi18x89ABCDEF, vxk18x89ABCDEF);
657       vprodGHIJKLMN = _mm_mullo_epi16(vxi18xGHIJKLMN, vxk18xGHIJKLMN);
658 
659       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
660       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
661       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
662       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
663       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
664       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
665 
666       const __m128i vi19x01234567 = _mm_loadl_epi64((const __m128i*) i19);
667       const __m128i vxi19x01234567 = _mm_cvtepi8_epi16(vi19x01234567);
668       const __m128i vk19x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 456 * sizeof(int8_t)));
669       const __m128i vxk19x01234567 = _mm_cvtepi8_epi16(vk19x01234567);
670       const __m128i vi19x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i19 + 8));
671       const __m128i vxi19x89ABCDEF = _mm_cvtepi8_epi16(vi19x89ABCDEF);
672       const __m128i vk19x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 464 * sizeof(int8_t)));
673       const __m128i vxk19x89ABCDEF = _mm_cvtepi8_epi16(vk19x89ABCDEF);
674       const __m128i vi19xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i19 + 16));
675       const __m128i vxi19xGHIJKLMN = _mm_cvtepi8_epi16(vi19xGHIJKLMN);
676       const __m128i vk19xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 472 * sizeof(int8_t)));
677       const __m128i vxk19xGHIJKLMN = _mm_cvtepi8_epi16(vk19xGHIJKLMN);
678       i19 += 24;
679 
680 
681       vprod01234567 = _mm_mullo_epi16(vxi19x01234567, vxk19x01234567);
682       vprod89ABCDEF = _mm_mullo_epi16(vxi19x89ABCDEF, vxk19x89ABCDEF);
683       vprodGHIJKLMN = _mm_mullo_epi16(vxi19xGHIJKLMN, vxk19xGHIJKLMN);
684 
685       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
686       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
687       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
688       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
689       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
690       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
691 
692       const __m128i vi20x01234567 = _mm_loadl_epi64((const __m128i*) i20);
693       const __m128i vxi20x01234567 = _mm_cvtepi8_epi16(vi20x01234567);
694       const __m128i vk20x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 480 * sizeof(int8_t)));
695       const __m128i vxk20x01234567 = _mm_cvtepi8_epi16(vk20x01234567);
696       const __m128i vi20x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i20 + 8));
697       const __m128i vxi20x89ABCDEF = _mm_cvtepi8_epi16(vi20x89ABCDEF);
698       const __m128i vk20x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 488 * sizeof(int8_t)));
699       const __m128i vxk20x89ABCDEF = _mm_cvtepi8_epi16(vk20x89ABCDEF);
700       const __m128i vi20xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i20 + 16));
701       const __m128i vxi20xGHIJKLMN = _mm_cvtepi8_epi16(vi20xGHIJKLMN);
702       const __m128i vk20xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 496 * sizeof(int8_t)));
703       const __m128i vxk20xGHIJKLMN = _mm_cvtepi8_epi16(vk20xGHIJKLMN);
704       i20 += 24;
705 
706 
707       vprod01234567 = _mm_mullo_epi16(vxi20x01234567, vxk20x01234567);
708       vprod89ABCDEF = _mm_mullo_epi16(vxi20x89ABCDEF, vxk20x89ABCDEF);
709       vprodGHIJKLMN = _mm_mullo_epi16(vxi20xGHIJKLMN, vxk20xGHIJKLMN);
710 
711       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
712       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
713       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
714       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
715       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
716       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
717 
718       const __m128i vi21x01234567 = _mm_loadl_epi64((const __m128i*) i21);
719       const __m128i vxi21x01234567 = _mm_cvtepi8_epi16(vi21x01234567);
720       const __m128i vk21x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 504 * sizeof(int8_t)));
721       const __m128i vxk21x01234567 = _mm_cvtepi8_epi16(vk21x01234567);
722       const __m128i vi21x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i21 + 8));
723       const __m128i vxi21x89ABCDEF = _mm_cvtepi8_epi16(vi21x89ABCDEF);
724       const __m128i vk21x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 512 * sizeof(int8_t)));
725       const __m128i vxk21x89ABCDEF = _mm_cvtepi8_epi16(vk21x89ABCDEF);
726       const __m128i vi21xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i21 + 16));
727       const __m128i vxi21xGHIJKLMN = _mm_cvtepi8_epi16(vi21xGHIJKLMN);
728       const __m128i vk21xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 520 * sizeof(int8_t)));
729       const __m128i vxk21xGHIJKLMN = _mm_cvtepi8_epi16(vk21xGHIJKLMN);
730       i21 += 24;
731 
732 
733       vprod01234567 = _mm_mullo_epi16(vxi21x01234567, vxk21x01234567);
734       vprod89ABCDEF = _mm_mullo_epi16(vxi21x89ABCDEF, vxk21x89ABCDEF);
735       vprodGHIJKLMN = _mm_mullo_epi16(vxi21xGHIJKLMN, vxk21xGHIJKLMN);
736 
737       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
738       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
739       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
740       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
741       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
742       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
743 
744       const __m128i vi22x01234567 = _mm_loadl_epi64((const __m128i*) i22);
745       const __m128i vxi22x01234567 = _mm_cvtepi8_epi16(vi22x01234567);
746       const __m128i vk22x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 528 * sizeof(int8_t)));
747       const __m128i vxk22x01234567 = _mm_cvtepi8_epi16(vk22x01234567);
748       const __m128i vi22x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i22 + 8));
749       const __m128i vxi22x89ABCDEF = _mm_cvtepi8_epi16(vi22x89ABCDEF);
750       const __m128i vk22x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 536 * sizeof(int8_t)));
751       const __m128i vxk22x89ABCDEF = _mm_cvtepi8_epi16(vk22x89ABCDEF);
752       const __m128i vi22xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i22 + 16));
753       const __m128i vxi22xGHIJKLMN = _mm_cvtepi8_epi16(vi22xGHIJKLMN);
754       const __m128i vk22xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 544 * sizeof(int8_t)));
755       const __m128i vxk22xGHIJKLMN = _mm_cvtepi8_epi16(vk22xGHIJKLMN);
756       i22 += 24;
757 
758 
759       vprod01234567 = _mm_mullo_epi16(vxi22x01234567, vxk22x01234567);
760       vprod89ABCDEF = _mm_mullo_epi16(vxi22x89ABCDEF, vxk22x89ABCDEF);
761       vprodGHIJKLMN = _mm_mullo_epi16(vxi22xGHIJKLMN, vxk22xGHIJKLMN);
762 
763       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
764       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
765       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
766       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
767       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
768       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
769 
770       const __m128i vi23x01234567 = _mm_loadl_epi64((const __m128i*) i23);
771       const __m128i vxi23x01234567 = _mm_cvtepi8_epi16(vi23x01234567);
772       const __m128i vk23x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 552 * sizeof(int8_t)));
773       const __m128i vxk23x01234567 = _mm_cvtepi8_epi16(vk23x01234567);
774       const __m128i vi23x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i23 + 8));
775       const __m128i vxi23x89ABCDEF = _mm_cvtepi8_epi16(vi23x89ABCDEF);
776       const __m128i vk23x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 560 * sizeof(int8_t)));
777       const __m128i vxk23x89ABCDEF = _mm_cvtepi8_epi16(vk23x89ABCDEF);
778       const __m128i vi23xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i23 + 16));
779       const __m128i vxi23xGHIJKLMN = _mm_cvtepi8_epi16(vi23xGHIJKLMN);
780       const __m128i vk23xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 568 * sizeof(int8_t)));
781       const __m128i vxk23xGHIJKLMN = _mm_cvtepi8_epi16(vk23xGHIJKLMN);
782       i23 += 24;
783 
784 
785       vprod01234567 = _mm_mullo_epi16(vxi23x01234567, vxk23x01234567);
786       vprod89ABCDEF = _mm_mullo_epi16(vxi23x89ABCDEF, vxk23x89ABCDEF);
787       vprodGHIJKLMN = _mm_mullo_epi16(vxi23xGHIJKLMN, vxk23xGHIJKLMN);
788 
789       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
790       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
791       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
792       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
793       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
794       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
795 
796       const __m128i vi24x01234567 = _mm_loadl_epi64((const __m128i*) i24);
797       const __m128i vxi24x01234567 = _mm_cvtepi8_epi16(vi24x01234567);
798       const __m128i vk24x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 576 * sizeof(int8_t)));
799       const __m128i vxk24x01234567 = _mm_cvtepi8_epi16(vk24x01234567);
800       const __m128i vi24x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i24 + 8));
801       const __m128i vxi24x89ABCDEF = _mm_cvtepi8_epi16(vi24x89ABCDEF);
802       const __m128i vk24x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 584 * sizeof(int8_t)));
803       const __m128i vxk24x89ABCDEF = _mm_cvtepi8_epi16(vk24x89ABCDEF);
804       const __m128i vi24xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i24 + 16));
805       const __m128i vxi24xGHIJKLMN = _mm_cvtepi8_epi16(vi24xGHIJKLMN);
806       const __m128i vk24xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 592 * sizeof(int8_t)));
807       const __m128i vxk24xGHIJKLMN = _mm_cvtepi8_epi16(vk24xGHIJKLMN);
808       i24 += 24;
809 
810 
811       vprod01234567 = _mm_mullo_epi16(vxi24x01234567, vxk24x01234567);
812       vprod89ABCDEF = _mm_mullo_epi16(vxi24x89ABCDEF, vxk24x89ABCDEF);
813       vprodGHIJKLMN = _mm_mullo_epi16(vxi24xGHIJKLMN, vxk24xGHIJKLMN);
814 
815       vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
816       vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
817       vacc89AB = _mm_add_epi32(vacc89AB, _mm_cvtepi16_epi32(vprod89ABCDEF));
818       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_srai_epi32(_mm_unpackhi_epi16(vprod89ABCDEF, vprod89ABCDEF), 16));
819       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_cvtepi16_epi32(vprodGHIJKLMN));
820       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_srai_epi32(_mm_unpackhi_epi16(vprodGHIJKLMN, vprodGHIJKLMN), 16));
821 
822       w = (const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 600 * sizeof(int8_t));
823 
824       __m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
825       __m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
826       __m128 vscaled89AB = _mm_cvtepi32_ps(vacc89AB);
827       __m128 vscaledCDEF = _mm_cvtepi32_ps(vaccCDEF);
828       __m128 vscaledGHIJ = _mm_cvtepi32_ps(vaccGHIJ);
829       __m128 vscaledKLMN = _mm_cvtepi32_ps(vaccKLMN);
830 
831       const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
832       vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
833       vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
834       vscaled89AB = _mm_mul_ps(vscaled89AB, vscale);
835       vscaledCDEF = _mm_mul_ps(vscaledCDEF, vscale);
836       vscaledGHIJ = _mm_mul_ps(vscaledGHIJ, vscale);
837       vscaledKLMN = _mm_mul_ps(vscaledKLMN, vscale);
838 
839       const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
840       vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
841       vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
842       vscaled89AB = _mm_min_ps(vscaled89AB, voutput_max_less_zero_point);
843       vscaledCDEF = _mm_min_ps(vscaledCDEF, voutput_max_less_zero_point);
844       vscaledGHIJ = _mm_min_ps(vscaledGHIJ, voutput_max_less_zero_point);
845       vscaledKLMN = _mm_min_ps(vscaledKLMN, voutput_max_less_zero_point);
846 
847       vacc0123 = _mm_cvtps_epi32(vscaled0123);
848       vacc4567 = _mm_cvtps_epi32(vscaled4567);
849       vacc89AB = _mm_cvtps_epi32(vscaled89AB);
850       vaccCDEF = _mm_cvtps_epi32(vscaledCDEF);
851       vaccGHIJ = _mm_cvtps_epi32(vscaledGHIJ);
852       vaccKLMN = _mm_cvtps_epi32(vscaledKLMN);
853 
854       const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
855       __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
856       __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
857       __m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point);
858 
859 
860       __m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
861       __m128i voutGHIJKLMNGHIJKLMN = _mm_packs_epi16(voutGHIJKLMN, voutGHIJKLMN);
862 
863       const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
864       vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
865       voutGHIJKLMNGHIJKLMN = _mm_max_epi8(voutGHIJKLMNGHIJKLMN, voutput_min);
866 
867       _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
868       _mm_storel_epi64((__m128i*) (output + 16), voutGHIJKLMNGHIJKLMN);
869       output += 24;
870     }
871     if XNN_UNLIKELY(c != 0) {
872       const int8_t* k = (const int8_t*) ((const int32_t*) w + 24);
873       do {
874         __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
875         __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
876 
877 
878         const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
879         const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
880         const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) k);
881         const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
882         i0 += 8;
883 
884 
885         __m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
886 
887         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
888         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
889 
890         const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
891         const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
892         const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) (k + 24));
893         const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
894         i1 += 8;
895 
896 
897         vprod01234567 = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
898 
899         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
900         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
901 
902         const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
903         const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
904         const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) (k + 48));
905         const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
906         i2 += 8;
907 
908 
909         vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
910 
911         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
912         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
913 
914         const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
915         const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
916         const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) (k + 72));
917         const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
918         i3 += 8;
919 
920 
921         vprod01234567 = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
922 
923         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
924         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
925 
926         const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
927         const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
928         const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) (k + 96));
929         const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
930         i4 += 8;
931 
932 
933         vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
934 
935         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
936         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
937 
938         const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
939         const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
940         const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) (k + 120));
941         const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
942         i5 += 8;
943 
944 
945         vprod01234567 = _mm_mullo_epi16(vxi5x01234567, vxk5x01234567);
946 
947         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
948         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
949 
950         const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
951         const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
952         const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) (k + 144));
953         const __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
954         i6 += 8;
955 
956 
957         vprod01234567 = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
958 
959         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
960         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
961 
962         const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
963         const __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
964         const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) (k + 168));
965         const __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
966         i7 += 8;
967 
968 
969         vprod01234567 = _mm_mullo_epi16(vxi7x01234567, vxk7x01234567);
970 
971         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
972         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
973 
974         const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
975         const __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
976         const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) (k + 192));
977         const __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
978         i8 += 8;
979 
980 
981         vprod01234567 = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
982 
983         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
984         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
985 
986         const __m128i vi9x01234567 = _mm_loadl_epi64((const __m128i*) i9);
987         const __m128i vxi9x01234567 = _mm_cvtepi8_epi16(vi9x01234567);
988         const __m128i vk9x01234567 = _mm_loadl_epi64((const __m128i*) (k + 216));
989         const __m128i vxk9x01234567 = _mm_cvtepi8_epi16(vk9x01234567);
990         i9 += 8;
991 
992 
993         vprod01234567 = _mm_mullo_epi16(vxi9x01234567, vxk9x01234567);
994 
995         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
996         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
997 
998         const __m128i vi10x01234567 = _mm_loadl_epi64((const __m128i*) i10);
999         const __m128i vxi10x01234567 = _mm_cvtepi8_epi16(vi10x01234567);
1000         const __m128i vk10x01234567 = _mm_loadl_epi64((const __m128i*) (k + 240));
1001         const __m128i vxk10x01234567 = _mm_cvtepi8_epi16(vk10x01234567);
1002         i10 += 8;
1003 
1004 
1005         vprod01234567 = _mm_mullo_epi16(vxi10x01234567, vxk10x01234567);
1006 
1007         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
1008         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
1009 
1010         const __m128i vi11x01234567 = _mm_loadl_epi64((const __m128i*) i11);
1011         const __m128i vxi11x01234567 = _mm_cvtepi8_epi16(vi11x01234567);
1012         const __m128i vk11x01234567 = _mm_loadl_epi64((const __m128i*) (k + 264));
1013         const __m128i vxk11x01234567 = _mm_cvtepi8_epi16(vk11x01234567);
1014         i11 += 8;
1015 
1016 
1017         vprod01234567 = _mm_mullo_epi16(vxi11x01234567, vxk11x01234567);
1018 
1019         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
1020         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
1021 
1022         const __m128i vi12x01234567 = _mm_loadl_epi64((const __m128i*) i12);
1023         const __m128i vxi12x01234567 = _mm_cvtepi8_epi16(vi12x01234567);
1024         const __m128i vk12x01234567 = _mm_loadl_epi64((const __m128i*) (k + 288));
1025         const __m128i vxk12x01234567 = _mm_cvtepi8_epi16(vk12x01234567);
1026         i12 += 8;
1027 
1028 
1029         vprod01234567 = _mm_mullo_epi16(vxi12x01234567, vxk12x01234567);
1030 
1031         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
1032         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
1033 
1034         const __m128i vi13x01234567 = _mm_loadl_epi64((const __m128i*) i13);
1035         const __m128i vxi13x01234567 = _mm_cvtepi8_epi16(vi13x01234567);
1036         const __m128i vk13x01234567 = _mm_loadl_epi64((const __m128i*) (k + 312));
1037         const __m128i vxk13x01234567 = _mm_cvtepi8_epi16(vk13x01234567);
1038         i13 += 8;
1039 
1040 
1041         vprod01234567 = _mm_mullo_epi16(vxi13x01234567, vxk13x01234567);
1042 
1043         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
1044         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
1045 
1046         const __m128i vi14x01234567 = _mm_loadl_epi64((const __m128i*) i14);
1047         const __m128i vxi14x01234567 = _mm_cvtepi8_epi16(vi14x01234567);
1048         const __m128i vk14x01234567 = _mm_loadl_epi64((const __m128i*) (k + 336));
1049         const __m128i vxk14x01234567 = _mm_cvtepi8_epi16(vk14x01234567);
1050         i14 += 8;
1051 
1052 
1053         vprod01234567 = _mm_mullo_epi16(vxi14x01234567, vxk14x01234567);
1054 
1055         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
1056         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
1057 
1058         const __m128i vi15x01234567 = _mm_loadl_epi64((const __m128i*) i15);
1059         const __m128i vxi15x01234567 = _mm_cvtepi8_epi16(vi15x01234567);
1060         const __m128i vk15x01234567 = _mm_loadl_epi64((const __m128i*) (k + 360));
1061         const __m128i vxk15x01234567 = _mm_cvtepi8_epi16(vk15x01234567);
1062         i15 += 8;
1063 
1064 
1065         vprod01234567 = _mm_mullo_epi16(vxi15x01234567, vxk15x01234567);
1066 
1067         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
1068         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
1069 
1070         const __m128i vi16x01234567 = _mm_loadl_epi64((const __m128i*) i16);
1071         const __m128i vxi16x01234567 = _mm_cvtepi8_epi16(vi16x01234567);
1072         const __m128i vk16x01234567 = _mm_loadl_epi64((const __m128i*) (k + 384));
1073         const __m128i vxk16x01234567 = _mm_cvtepi8_epi16(vk16x01234567);
1074         i16 += 8;
1075 
1076 
1077         vprod01234567 = _mm_mullo_epi16(vxi16x01234567, vxk16x01234567);
1078 
1079         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
1080         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
1081 
1082         const __m128i vi17x01234567 = _mm_loadl_epi64((const __m128i*) i17);
1083         const __m128i vxi17x01234567 = _mm_cvtepi8_epi16(vi17x01234567);
1084         const __m128i vk17x01234567 = _mm_loadl_epi64((const __m128i*) (k + 408));
1085         const __m128i vxk17x01234567 = _mm_cvtepi8_epi16(vk17x01234567);
1086         i17 += 8;
1087 
1088 
1089         vprod01234567 = _mm_mullo_epi16(vxi17x01234567, vxk17x01234567);
1090 
1091         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
1092         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
1093 
1094         const __m128i vi18x01234567 = _mm_loadl_epi64((const __m128i*) i18);
1095         const __m128i vxi18x01234567 = _mm_cvtepi8_epi16(vi18x01234567);
1096         const __m128i vk18x01234567 = _mm_loadl_epi64((const __m128i*) (k + 432));
1097         const __m128i vxk18x01234567 = _mm_cvtepi8_epi16(vk18x01234567);
1098         i18 += 8;
1099 
1100 
1101         vprod01234567 = _mm_mullo_epi16(vxi18x01234567, vxk18x01234567);
1102 
1103         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
1104         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
1105 
1106         const __m128i vi19x01234567 = _mm_loadl_epi64((const __m128i*) i19);
1107         const __m128i vxi19x01234567 = _mm_cvtepi8_epi16(vi19x01234567);
1108         const __m128i vk19x01234567 = _mm_loadl_epi64((const __m128i*) (k + 456));
1109         const __m128i vxk19x01234567 = _mm_cvtepi8_epi16(vk19x01234567);
1110         i19 += 8;
1111 
1112 
1113         vprod01234567 = _mm_mullo_epi16(vxi19x01234567, vxk19x01234567);
1114 
1115         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
1116         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
1117 
1118         const __m128i vi20x01234567 = _mm_loadl_epi64((const __m128i*) i20);
1119         const __m128i vxi20x01234567 = _mm_cvtepi8_epi16(vi20x01234567);
1120         const __m128i vk20x01234567 = _mm_loadl_epi64((const __m128i*) (k + 480));
1121         const __m128i vxk20x01234567 = _mm_cvtepi8_epi16(vk20x01234567);
1122         i20 += 8;
1123 
1124 
1125         vprod01234567 = _mm_mullo_epi16(vxi20x01234567, vxk20x01234567);
1126 
1127         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
1128         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
1129 
1130         const __m128i vi21x01234567 = _mm_loadl_epi64((const __m128i*) i21);
1131         const __m128i vxi21x01234567 = _mm_cvtepi8_epi16(vi21x01234567);
1132         const __m128i vk21x01234567 = _mm_loadl_epi64((const __m128i*) (k + 504));
1133         const __m128i vxk21x01234567 = _mm_cvtepi8_epi16(vk21x01234567);
1134         i21 += 8;
1135 
1136 
1137         vprod01234567 = _mm_mullo_epi16(vxi21x01234567, vxk21x01234567);
1138 
1139         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
1140         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
1141 
1142         const __m128i vi22x01234567 = _mm_loadl_epi64((const __m128i*) i22);
1143         const __m128i vxi22x01234567 = _mm_cvtepi8_epi16(vi22x01234567);
1144         const __m128i vk22x01234567 = _mm_loadl_epi64((const __m128i*) (k + 528));
1145         const __m128i vxk22x01234567 = _mm_cvtepi8_epi16(vk22x01234567);
1146         i22 += 8;
1147 
1148 
1149         vprod01234567 = _mm_mullo_epi16(vxi22x01234567, vxk22x01234567);
1150 
1151         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
1152         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
1153 
1154         const __m128i vi23x01234567 = _mm_loadl_epi64((const __m128i*) i23);
1155         const __m128i vxi23x01234567 = _mm_cvtepi8_epi16(vi23x01234567);
1156         const __m128i vk23x01234567 = _mm_loadl_epi64((const __m128i*) (k + 552));
1157         const __m128i vxk23x01234567 = _mm_cvtepi8_epi16(vk23x01234567);
1158         i23 += 8;
1159 
1160 
1161         vprod01234567 = _mm_mullo_epi16(vxi23x01234567, vxk23x01234567);
1162 
1163         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
1164         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
1165 
1166         const __m128i vi24x01234567 = _mm_loadl_epi64((const __m128i*) i24);
1167         const __m128i vxi24x01234567 = _mm_cvtepi8_epi16(vi24x01234567);
1168         const __m128i vk24x01234567 = _mm_loadl_epi64((const __m128i*) (k + 576));
1169         const __m128i vxk24x01234567 = _mm_cvtepi8_epi16(vk24x01234567);
1170         i24 += 8;
1171 
1172 
1173         vprod01234567 = _mm_mullo_epi16(vxi24x01234567, vxk24x01234567);
1174 
1175         vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
1176         vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
1177 
1178         k += 8;
1179 
1180         __m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
1181         __m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
1182 
1183         const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
1184         vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
1185         vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
1186 
1187         const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
1188         vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
1189         vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
1190 
1191         vacc0123 = _mm_cvtps_epi32(vscaled0123);
1192         vacc4567 = _mm_cvtps_epi32(vscaled4567);
1193 
1194         w = (const void*) ((const int32_t*) w + 8);
1195 
1196         const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
1197         __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
1198 
1199 
1200         __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
1201 
1202         vout0123456701234567 = _mm_max_epi8(vout0123456701234567, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
1203 
1204         if XNN_LIKELY(c >= 8) {
1205           _mm_storel_epi64((__m128i*) output, vout0123456701234567);
1206           output += 8;
1207           c -= 8;
1208         } else {
1209           if (c & 4) {
1210             unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
1211             vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
1212             output += 4;
1213           }
1214           if (c & 2) {
1215             unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
1216             vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
1217             output += 2;
1218           }
1219           if (c & 1) {
1220             *output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
1221             output += 1;
1222           }
1223           c = 0;
1224         }
1225       } while (c != 0);
1226     }
1227 
1228     output = (int8_t*) ((uintptr_t) output + output_increment);
1229   } while (--output_width != 0);
1230 }
1231