xref: /aosp_15_r20/external/XNNPACK/src/qs8-dwconv/gen/up24x25-minmax-fp32-sse41-mul32.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-dwconv/unipass-sse-mul32.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 #include <xnnpack/unaligned.h>
17 
18 
xnn_qs8_dwconv_minmax_fp32_ukernel_up24x25__sse41_mul32(size_t channels,size_t output_width,const int8_t ** input,const void * weights,int8_t * output,size_t input_stride,size_t output_increment,size_t input_offset,const int8_t * zero,const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_qs8_dwconv_minmax_fp32_ukernel_up24x25__sse41_mul32(
20     size_t channels,
21     size_t output_width,
22     const int8_t** input,
23     const void* weights,
24     int8_t* output,
25     size_t input_stride,
26     size_t output_increment,
27     size_t input_offset,
28     const int8_t* zero,
29     const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
30 {
31   assert(channels != 0);
32   assert(output_width != 0);
33 
34   do {
35     const int8_t* i0 = input[0];
36     assert(i0 != NULL);
37     if XNN_UNPREDICTABLE(i0 != zero) {
38       i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
39     }
40     const int8_t* i1 = input[1];
41     assert(i1 != NULL);
42     if XNN_UNPREDICTABLE(i1 != zero) {
43       i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
44     }
45     const int8_t* i2 = input[2];
46     assert(i2 != NULL);
47     if XNN_UNPREDICTABLE(i2 != zero) {
48       i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
49     }
50     const int8_t* i3 = input[3];
51     assert(i3 != NULL);
52     if XNN_UNPREDICTABLE(i3 != zero) {
53       i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
54     }
55     const int8_t* i4 = input[4];
56     assert(i4 != NULL);
57     if XNN_UNPREDICTABLE(i4 != zero) {
58       i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
59     }
60     const int8_t* i5 = input[5];
61     assert(i5 != NULL);
62     if XNN_UNPREDICTABLE(i5 != zero) {
63       i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
64     }
65     const int8_t* i6 = input[6];
66     assert(i6 != NULL);
67     if XNN_UNPREDICTABLE(i6 != zero) {
68       i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
69     }
70     const int8_t* i7 = input[7];
71     assert(i7 != NULL);
72     if XNN_UNPREDICTABLE(i7 != zero) {
73       i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
74     }
75     const int8_t* i8 = input[8];
76     assert(i8 != NULL);
77     if XNN_UNPREDICTABLE(i8 != zero) {
78       i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
79     }
80     const int8_t* i9 = input[9];
81     assert(i9 != NULL);
82     if XNN_UNPREDICTABLE(i9 != zero) {
83       i9 = (const int8_t*) ((uintptr_t) i9 + input_offset);
84     }
85     const int8_t* i10 = input[10];
86     assert(i10 != NULL);
87     if XNN_UNPREDICTABLE(i10 != zero) {
88       i10 = (const int8_t*) ((uintptr_t) i10 + input_offset);
89     }
90     const int8_t* i11 = input[11];
91     assert(i11 != NULL);
92     if XNN_UNPREDICTABLE(i11 != zero) {
93       i11 = (const int8_t*) ((uintptr_t) i11 + input_offset);
94     }
95     const int8_t* i12 = input[12];
96     assert(i12 != NULL);
97     if XNN_UNPREDICTABLE(i12 != zero) {
98       i12 = (const int8_t*) ((uintptr_t) i12 + input_offset);
99     }
100     const int8_t* i13 = input[13];
101     assert(i13 != NULL);
102     if XNN_UNPREDICTABLE(i13 != zero) {
103       i13 = (const int8_t*) ((uintptr_t) i13 + input_offset);
104     }
105     const int8_t* i14 = input[14];
106     assert(i14 != NULL);
107     if XNN_UNPREDICTABLE(i14 != zero) {
108       i14 = (const int8_t*) ((uintptr_t) i14 + input_offset);
109     }
110     const int8_t* i15 = input[15];
111     assert(i15 != NULL);
112     if XNN_UNPREDICTABLE(i15 != zero) {
113       i15 = (const int8_t*) ((uintptr_t) i15 + input_offset);
114     }
115     const int8_t* i16 = input[16];
116     assert(i16 != NULL);
117     if XNN_UNPREDICTABLE(i16 != zero) {
118       i16 = (const int8_t*) ((uintptr_t) i16 + input_offset);
119     }
120     const int8_t* i17 = input[17];
121     assert(i17 != NULL);
122     if XNN_UNPREDICTABLE(i17 != zero) {
123       i17 = (const int8_t*) ((uintptr_t) i17 + input_offset);
124     }
125     const int8_t* i18 = input[18];
126     assert(i18 != NULL);
127     if XNN_UNPREDICTABLE(i18 != zero) {
128       i18 = (const int8_t*) ((uintptr_t) i18 + input_offset);
129     }
130     const int8_t* i19 = input[19];
131     assert(i19 != NULL);
132     if XNN_UNPREDICTABLE(i19 != zero) {
133       i19 = (const int8_t*) ((uintptr_t) i19 + input_offset);
134     }
135     const int8_t* i20 = input[20];
136     assert(i20 != NULL);
137     if XNN_UNPREDICTABLE(i20 != zero) {
138       i20 = (const int8_t*) ((uintptr_t) i20 + input_offset);
139     }
140     const int8_t* i21 = input[21];
141     assert(i21 != NULL);
142     if XNN_UNPREDICTABLE(i21 != zero) {
143       i21 = (const int8_t*) ((uintptr_t) i21 + input_offset);
144     }
145     const int8_t* i22 = input[22];
146     assert(i22 != NULL);
147     if XNN_UNPREDICTABLE(i22 != zero) {
148       i22 = (const int8_t*) ((uintptr_t) i22 + input_offset);
149     }
150     const int8_t* i23 = input[23];
151     assert(i23 != NULL);
152     if XNN_UNPREDICTABLE(i23 != zero) {
153       i23 = (const int8_t*) ((uintptr_t) i23 + input_offset);
154     }
155     const int8_t* i24 = input[24];
156     assert(i24 != NULL);
157     if XNN_UNPREDICTABLE(i24 != zero) {
158       i24 = (const int8_t*) ((uintptr_t) i24 + input_offset);
159     }
160     input = (const int8_t**) ((uintptr_t) input + input_stride);
161 
162     size_t c = channels;
163     const void* w = weights;
164     for (; c >= 24; c -= 24) {
165       __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
166       __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
167       __m128i vacc89AB = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 8));
168       __m128i vaccCDEF = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 12));
169       __m128i vaccGHIJ = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 16));
170       __m128i vaccKLMN = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 20));
171 
172 
173       const __m128i vi0x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i0)));
174       const __m128i vk0x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 0 * sizeof(int8_t)))));
175       const __m128i vi0x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i0 + 4)));
176       const __m128i vk0x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 4 * sizeof(int8_t)))));
177       const __m128i vi0x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i0 + 8)));
178       const __m128i vk0x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 8 * sizeof(int8_t)))));
179       const __m128i vi0xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i0 + 12)));
180       const __m128i vk0xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 12 * sizeof(int8_t)))));
181       const __m128i vi0xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i0 + 16)));
182       const __m128i vk0xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 16 * sizeof(int8_t)))));
183       const __m128i vi0xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i0 + 20)));
184       const __m128i vk0xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 20 * sizeof(int8_t)))));
185       i0 += 24;
186 
187       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi0x0123, vk0x0123));
188       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi0x4567, vk0x4567));
189       vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi0x89AB, vk0x89AB));
190       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi0xCDEF, vk0xCDEF));
191       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_mullo_epi32(vi0xGHIJ, vk0xGHIJ));
192       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_mullo_epi32(vi0xKLMN, vk0xKLMN));
193 
194       const __m128i vi1x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i1)));
195       const __m128i vk1x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 24 * sizeof(int8_t)))));
196       const __m128i vi1x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i1 + 4)));
197       const __m128i vk1x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 28 * sizeof(int8_t)))));
198       const __m128i vi1x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i1 + 8)));
199       const __m128i vk1x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 32 * sizeof(int8_t)))));
200       const __m128i vi1xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i1 + 12)));
201       const __m128i vk1xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 36 * sizeof(int8_t)))));
202       const __m128i vi1xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i1 + 16)));
203       const __m128i vk1xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 40 * sizeof(int8_t)))));
204       const __m128i vi1xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i1 + 20)));
205       const __m128i vk1xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 44 * sizeof(int8_t)))));
206       i1 += 24;
207 
208       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi1x0123, vk1x0123));
209       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi1x4567, vk1x4567));
210       vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi1x89AB, vk1x89AB));
211       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi1xCDEF, vk1xCDEF));
212       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_mullo_epi32(vi1xGHIJ, vk1xGHIJ));
213       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_mullo_epi32(vi1xKLMN, vk1xKLMN));
214 
215       const __m128i vi2x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i2)));
216       const __m128i vk2x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 48 * sizeof(int8_t)))));
217       const __m128i vi2x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i2 + 4)));
218       const __m128i vk2x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 52 * sizeof(int8_t)))));
219       const __m128i vi2x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i2 + 8)));
220       const __m128i vk2x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 56 * sizeof(int8_t)))));
221       const __m128i vi2xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i2 + 12)));
222       const __m128i vk2xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 60 * sizeof(int8_t)))));
223       const __m128i vi2xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i2 + 16)));
224       const __m128i vk2xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 64 * sizeof(int8_t)))));
225       const __m128i vi2xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i2 + 20)));
226       const __m128i vk2xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 68 * sizeof(int8_t)))));
227       i2 += 24;
228 
229       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi2x0123, vk2x0123));
230       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi2x4567, vk2x4567));
231       vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi2x89AB, vk2x89AB));
232       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi2xCDEF, vk2xCDEF));
233       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_mullo_epi32(vi2xGHIJ, vk2xGHIJ));
234       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_mullo_epi32(vi2xKLMN, vk2xKLMN));
235 
236       const __m128i vi3x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i3)));
237       const __m128i vk3x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 72 * sizeof(int8_t)))));
238       const __m128i vi3x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i3 + 4)));
239       const __m128i vk3x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 76 * sizeof(int8_t)))));
240       const __m128i vi3x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i3 + 8)));
241       const __m128i vk3x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 80 * sizeof(int8_t)))));
242       const __m128i vi3xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i3 + 12)));
243       const __m128i vk3xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 84 * sizeof(int8_t)))));
244       const __m128i vi3xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i3 + 16)));
245       const __m128i vk3xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 88 * sizeof(int8_t)))));
246       const __m128i vi3xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i3 + 20)));
247       const __m128i vk3xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 92 * sizeof(int8_t)))));
248       i3 += 24;
249 
250       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi3x0123, vk3x0123));
251       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi3x4567, vk3x4567));
252       vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi3x89AB, vk3x89AB));
253       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi3xCDEF, vk3xCDEF));
254       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_mullo_epi32(vi3xGHIJ, vk3xGHIJ));
255       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_mullo_epi32(vi3xKLMN, vk3xKLMN));
256 
257       const __m128i vi4x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i4)));
258       const __m128i vk4x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 96 * sizeof(int8_t)))));
259       const __m128i vi4x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i4 + 4)));
260       const __m128i vk4x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 100 * sizeof(int8_t)))));
261       const __m128i vi4x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i4 + 8)));
262       const __m128i vk4x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 104 * sizeof(int8_t)))));
263       const __m128i vi4xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i4 + 12)));
264       const __m128i vk4xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 108 * sizeof(int8_t)))));
265       const __m128i vi4xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i4 + 16)));
266       const __m128i vk4xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 112 * sizeof(int8_t)))));
267       const __m128i vi4xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i4 + 20)));
268       const __m128i vk4xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 116 * sizeof(int8_t)))));
269       i4 += 24;
270 
271       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi4x0123, vk4x0123));
272       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi4x4567, vk4x4567));
273       vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi4x89AB, vk4x89AB));
274       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi4xCDEF, vk4xCDEF));
275       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_mullo_epi32(vi4xGHIJ, vk4xGHIJ));
276       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_mullo_epi32(vi4xKLMN, vk4xKLMN));
277 
278       const __m128i vi5x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i5)));
279       const __m128i vk5x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 120 * sizeof(int8_t)))));
280       const __m128i vi5x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i5 + 4)));
281       const __m128i vk5x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 124 * sizeof(int8_t)))));
282       const __m128i vi5x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i5 + 8)));
283       const __m128i vk5x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 128 * sizeof(int8_t)))));
284       const __m128i vi5xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i5 + 12)));
285       const __m128i vk5xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 132 * sizeof(int8_t)))));
286       const __m128i vi5xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i5 + 16)));
287       const __m128i vk5xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 136 * sizeof(int8_t)))));
288       const __m128i vi5xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i5 + 20)));
289       const __m128i vk5xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 140 * sizeof(int8_t)))));
290       i5 += 24;
291 
292       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi5x0123, vk5x0123));
293       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi5x4567, vk5x4567));
294       vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi5x89AB, vk5x89AB));
295       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi5xCDEF, vk5xCDEF));
296       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_mullo_epi32(vi5xGHIJ, vk5xGHIJ));
297       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_mullo_epi32(vi5xKLMN, vk5xKLMN));
298 
299       const __m128i vi6x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i6)));
300       const __m128i vk6x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 144 * sizeof(int8_t)))));
301       const __m128i vi6x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i6 + 4)));
302       const __m128i vk6x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 148 * sizeof(int8_t)))));
303       const __m128i vi6x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i6 + 8)));
304       const __m128i vk6x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 152 * sizeof(int8_t)))));
305       const __m128i vi6xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i6 + 12)));
306       const __m128i vk6xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 156 * sizeof(int8_t)))));
307       const __m128i vi6xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i6 + 16)));
308       const __m128i vk6xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 160 * sizeof(int8_t)))));
309       const __m128i vi6xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i6 + 20)));
310       const __m128i vk6xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 164 * sizeof(int8_t)))));
311       i6 += 24;
312 
313       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi6x0123, vk6x0123));
314       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi6x4567, vk6x4567));
315       vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi6x89AB, vk6x89AB));
316       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi6xCDEF, vk6xCDEF));
317       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_mullo_epi32(vi6xGHIJ, vk6xGHIJ));
318       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_mullo_epi32(vi6xKLMN, vk6xKLMN));
319 
320       const __m128i vi7x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i7)));
321       const __m128i vk7x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 168 * sizeof(int8_t)))));
322       const __m128i vi7x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i7 + 4)));
323       const __m128i vk7x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 172 * sizeof(int8_t)))));
324       const __m128i vi7x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i7 + 8)));
325       const __m128i vk7x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 176 * sizeof(int8_t)))));
326       const __m128i vi7xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i7 + 12)));
327       const __m128i vk7xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 180 * sizeof(int8_t)))));
328       const __m128i vi7xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i7 + 16)));
329       const __m128i vk7xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 184 * sizeof(int8_t)))));
330       const __m128i vi7xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i7 + 20)));
331       const __m128i vk7xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 188 * sizeof(int8_t)))));
332       i7 += 24;
333 
334       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi7x0123, vk7x0123));
335       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi7x4567, vk7x4567));
336       vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi7x89AB, vk7x89AB));
337       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi7xCDEF, vk7xCDEF));
338       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_mullo_epi32(vi7xGHIJ, vk7xGHIJ));
339       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_mullo_epi32(vi7xKLMN, vk7xKLMN));
340 
341       const __m128i vi8x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i8)));
342       const __m128i vk8x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 192 * sizeof(int8_t)))));
343       const __m128i vi8x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i8 + 4)));
344       const __m128i vk8x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 196 * sizeof(int8_t)))));
345       const __m128i vi8x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i8 + 8)));
346       const __m128i vk8x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 200 * sizeof(int8_t)))));
347       const __m128i vi8xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i8 + 12)));
348       const __m128i vk8xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 204 * sizeof(int8_t)))));
349       const __m128i vi8xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i8 + 16)));
350       const __m128i vk8xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 208 * sizeof(int8_t)))));
351       const __m128i vi8xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i8 + 20)));
352       const __m128i vk8xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 212 * sizeof(int8_t)))));
353       i8 += 24;
354 
355       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi8x0123, vk8x0123));
356       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi8x4567, vk8x4567));
357       vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi8x89AB, vk8x89AB));
358       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi8xCDEF, vk8xCDEF));
359       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_mullo_epi32(vi8xGHIJ, vk8xGHIJ));
360       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_mullo_epi32(vi8xKLMN, vk8xKLMN));
361 
362       const __m128i vi9x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i9)));
363       const __m128i vk9x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 216 * sizeof(int8_t)))));
364       const __m128i vi9x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i9 + 4)));
365       const __m128i vk9x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 220 * sizeof(int8_t)))));
366       const __m128i vi9x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i9 + 8)));
367       const __m128i vk9x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 224 * sizeof(int8_t)))));
368       const __m128i vi9xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i9 + 12)));
369       const __m128i vk9xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 228 * sizeof(int8_t)))));
370       const __m128i vi9xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i9 + 16)));
371       const __m128i vk9xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 232 * sizeof(int8_t)))));
372       const __m128i vi9xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i9 + 20)));
373       const __m128i vk9xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 236 * sizeof(int8_t)))));
374       i9 += 24;
375 
376       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi9x0123, vk9x0123));
377       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi9x4567, vk9x4567));
378       vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi9x89AB, vk9x89AB));
379       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi9xCDEF, vk9xCDEF));
380       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_mullo_epi32(vi9xGHIJ, vk9xGHIJ));
381       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_mullo_epi32(vi9xKLMN, vk9xKLMN));
382 
383       const __m128i vi10x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i10)));
384       const __m128i vk10x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 240 * sizeof(int8_t)))));
385       const __m128i vi10x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i10 + 4)));
386       const __m128i vk10x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 244 * sizeof(int8_t)))));
387       const __m128i vi10x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i10 + 8)));
388       const __m128i vk10x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 248 * sizeof(int8_t)))));
389       const __m128i vi10xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i10 + 12)));
390       const __m128i vk10xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 252 * sizeof(int8_t)))));
391       const __m128i vi10xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i10 + 16)));
392       const __m128i vk10xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 256 * sizeof(int8_t)))));
393       const __m128i vi10xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i10 + 20)));
394       const __m128i vk10xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 260 * sizeof(int8_t)))));
395       i10 += 24;
396 
397       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi10x0123, vk10x0123));
398       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi10x4567, vk10x4567));
399       vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi10x89AB, vk10x89AB));
400       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi10xCDEF, vk10xCDEF));
401       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_mullo_epi32(vi10xGHIJ, vk10xGHIJ));
402       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_mullo_epi32(vi10xKLMN, vk10xKLMN));
403 
404       const __m128i vi11x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i11)));
405       const __m128i vk11x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 264 * sizeof(int8_t)))));
406       const __m128i vi11x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i11 + 4)));
407       const __m128i vk11x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 268 * sizeof(int8_t)))));
408       const __m128i vi11x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i11 + 8)));
409       const __m128i vk11x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 272 * sizeof(int8_t)))));
410       const __m128i vi11xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i11 + 12)));
411       const __m128i vk11xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 276 * sizeof(int8_t)))));
412       const __m128i vi11xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i11 + 16)));
413       const __m128i vk11xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 280 * sizeof(int8_t)))));
414       const __m128i vi11xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i11 + 20)));
415       const __m128i vk11xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 284 * sizeof(int8_t)))));
416       i11 += 24;
417 
418       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi11x0123, vk11x0123));
419       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi11x4567, vk11x4567));
420       vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi11x89AB, vk11x89AB));
421       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi11xCDEF, vk11xCDEF));
422       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_mullo_epi32(vi11xGHIJ, vk11xGHIJ));
423       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_mullo_epi32(vi11xKLMN, vk11xKLMN));
424 
425       const __m128i vi12x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i12)));
426       const __m128i vk12x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 288 * sizeof(int8_t)))));
427       const __m128i vi12x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i12 + 4)));
428       const __m128i vk12x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 292 * sizeof(int8_t)))));
429       const __m128i vi12x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i12 + 8)));
430       const __m128i vk12x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 296 * sizeof(int8_t)))));
431       const __m128i vi12xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i12 + 12)));
432       const __m128i vk12xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 300 * sizeof(int8_t)))));
433       const __m128i vi12xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i12 + 16)));
434       const __m128i vk12xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 304 * sizeof(int8_t)))));
435       const __m128i vi12xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i12 + 20)));
436       const __m128i vk12xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 308 * sizeof(int8_t)))));
437       i12 += 24;
438 
439       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi12x0123, vk12x0123));
440       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi12x4567, vk12x4567));
441       vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi12x89AB, vk12x89AB));
442       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi12xCDEF, vk12xCDEF));
443       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_mullo_epi32(vi12xGHIJ, vk12xGHIJ));
444       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_mullo_epi32(vi12xKLMN, vk12xKLMN));
445 
446       const __m128i vi13x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i13)));
447       const __m128i vk13x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 312 * sizeof(int8_t)))));
448       const __m128i vi13x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i13 + 4)));
449       const __m128i vk13x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 316 * sizeof(int8_t)))));
450       const __m128i vi13x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i13 + 8)));
451       const __m128i vk13x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 320 * sizeof(int8_t)))));
452       const __m128i vi13xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i13 + 12)));
453       const __m128i vk13xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 324 * sizeof(int8_t)))));
454       const __m128i vi13xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i13 + 16)));
455       const __m128i vk13xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 328 * sizeof(int8_t)))));
456       const __m128i vi13xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i13 + 20)));
457       const __m128i vk13xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 332 * sizeof(int8_t)))));
458       i13 += 24;
459 
460       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi13x0123, vk13x0123));
461       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi13x4567, vk13x4567));
462       vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi13x89AB, vk13x89AB));
463       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi13xCDEF, vk13xCDEF));
464       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_mullo_epi32(vi13xGHIJ, vk13xGHIJ));
465       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_mullo_epi32(vi13xKLMN, vk13xKLMN));
466 
467       const __m128i vi14x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i14)));
468       const __m128i vk14x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 336 * sizeof(int8_t)))));
469       const __m128i vi14x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i14 + 4)));
470       const __m128i vk14x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 340 * sizeof(int8_t)))));
471       const __m128i vi14x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i14 + 8)));
472       const __m128i vk14x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 344 * sizeof(int8_t)))));
473       const __m128i vi14xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i14 + 12)));
474       const __m128i vk14xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 348 * sizeof(int8_t)))));
475       const __m128i vi14xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i14 + 16)));
476       const __m128i vk14xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 352 * sizeof(int8_t)))));
477       const __m128i vi14xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i14 + 20)));
478       const __m128i vk14xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 356 * sizeof(int8_t)))));
479       i14 += 24;
480 
481       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi14x0123, vk14x0123));
482       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi14x4567, vk14x4567));
483       vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi14x89AB, vk14x89AB));
484       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi14xCDEF, vk14xCDEF));
485       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_mullo_epi32(vi14xGHIJ, vk14xGHIJ));
486       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_mullo_epi32(vi14xKLMN, vk14xKLMN));
487 
488       const __m128i vi15x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i15)));
489       const __m128i vk15x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 360 * sizeof(int8_t)))));
490       const __m128i vi15x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i15 + 4)));
491       const __m128i vk15x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 364 * sizeof(int8_t)))));
492       const __m128i vi15x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i15 + 8)));
493       const __m128i vk15x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 368 * sizeof(int8_t)))));
494       const __m128i vi15xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i15 + 12)));
495       const __m128i vk15xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 372 * sizeof(int8_t)))));
496       const __m128i vi15xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i15 + 16)));
497       const __m128i vk15xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 376 * sizeof(int8_t)))));
498       const __m128i vi15xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i15 + 20)));
499       const __m128i vk15xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 380 * sizeof(int8_t)))));
500       i15 += 24;
501 
502       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi15x0123, vk15x0123));
503       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi15x4567, vk15x4567));
504       vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi15x89AB, vk15x89AB));
505       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi15xCDEF, vk15xCDEF));
506       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_mullo_epi32(vi15xGHIJ, vk15xGHIJ));
507       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_mullo_epi32(vi15xKLMN, vk15xKLMN));
508 
509       const __m128i vi16x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i16)));
510       const __m128i vk16x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 384 * sizeof(int8_t)))));
511       const __m128i vi16x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i16 + 4)));
512       const __m128i vk16x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 388 * sizeof(int8_t)))));
513       const __m128i vi16x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i16 + 8)));
514       const __m128i vk16x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 392 * sizeof(int8_t)))));
515       const __m128i vi16xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i16 + 12)));
516       const __m128i vk16xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 396 * sizeof(int8_t)))));
517       const __m128i vi16xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i16 + 16)));
518       const __m128i vk16xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 400 * sizeof(int8_t)))));
519       const __m128i vi16xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i16 + 20)));
520       const __m128i vk16xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 404 * sizeof(int8_t)))));
521       i16 += 24;
522 
523       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi16x0123, vk16x0123));
524       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi16x4567, vk16x4567));
525       vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi16x89AB, vk16x89AB));
526       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi16xCDEF, vk16xCDEF));
527       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_mullo_epi32(vi16xGHIJ, vk16xGHIJ));
528       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_mullo_epi32(vi16xKLMN, vk16xKLMN));
529 
530       const __m128i vi17x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i17)));
531       const __m128i vk17x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 408 * sizeof(int8_t)))));
532       const __m128i vi17x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i17 + 4)));
533       const __m128i vk17x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 412 * sizeof(int8_t)))));
534       const __m128i vi17x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i17 + 8)));
535       const __m128i vk17x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 416 * sizeof(int8_t)))));
536       const __m128i vi17xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i17 + 12)));
537       const __m128i vk17xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 420 * sizeof(int8_t)))));
538       const __m128i vi17xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i17 + 16)));
539       const __m128i vk17xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 424 * sizeof(int8_t)))));
540       const __m128i vi17xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i17 + 20)));
541       const __m128i vk17xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 428 * sizeof(int8_t)))));
542       i17 += 24;
543 
544       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi17x0123, vk17x0123));
545       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi17x4567, vk17x4567));
546       vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi17x89AB, vk17x89AB));
547       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi17xCDEF, vk17xCDEF));
548       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_mullo_epi32(vi17xGHIJ, vk17xGHIJ));
549       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_mullo_epi32(vi17xKLMN, vk17xKLMN));
550 
551       const __m128i vi18x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i18)));
552       const __m128i vk18x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 432 * sizeof(int8_t)))));
553       const __m128i vi18x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i18 + 4)));
554       const __m128i vk18x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 436 * sizeof(int8_t)))));
555       const __m128i vi18x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i18 + 8)));
556       const __m128i vk18x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 440 * sizeof(int8_t)))));
557       const __m128i vi18xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i18 + 12)));
558       const __m128i vk18xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 444 * sizeof(int8_t)))));
559       const __m128i vi18xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i18 + 16)));
560       const __m128i vk18xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 448 * sizeof(int8_t)))));
561       const __m128i vi18xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i18 + 20)));
562       const __m128i vk18xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 452 * sizeof(int8_t)))));
563       i18 += 24;
564 
565       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi18x0123, vk18x0123));
566       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi18x4567, vk18x4567));
567       vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi18x89AB, vk18x89AB));
568       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi18xCDEF, vk18xCDEF));
569       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_mullo_epi32(vi18xGHIJ, vk18xGHIJ));
570       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_mullo_epi32(vi18xKLMN, vk18xKLMN));
571 
572       const __m128i vi19x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i19)));
573       const __m128i vk19x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 456 * sizeof(int8_t)))));
574       const __m128i vi19x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i19 + 4)));
575       const __m128i vk19x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 460 * sizeof(int8_t)))));
576       const __m128i vi19x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i19 + 8)));
577       const __m128i vk19x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 464 * sizeof(int8_t)))));
578       const __m128i vi19xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i19 + 12)));
579       const __m128i vk19xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 468 * sizeof(int8_t)))));
580       const __m128i vi19xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i19 + 16)));
581       const __m128i vk19xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 472 * sizeof(int8_t)))));
582       const __m128i vi19xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i19 + 20)));
583       const __m128i vk19xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 476 * sizeof(int8_t)))));
584       i19 += 24;
585 
586       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi19x0123, vk19x0123));
587       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi19x4567, vk19x4567));
588       vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi19x89AB, vk19x89AB));
589       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi19xCDEF, vk19xCDEF));
590       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_mullo_epi32(vi19xGHIJ, vk19xGHIJ));
591       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_mullo_epi32(vi19xKLMN, vk19xKLMN));
592 
593       const __m128i vi20x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i20)));
594       const __m128i vk20x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 480 * sizeof(int8_t)))));
595       const __m128i vi20x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i20 + 4)));
596       const __m128i vk20x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 484 * sizeof(int8_t)))));
597       const __m128i vi20x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i20 + 8)));
598       const __m128i vk20x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 488 * sizeof(int8_t)))));
599       const __m128i vi20xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i20 + 12)));
600       const __m128i vk20xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 492 * sizeof(int8_t)))));
601       const __m128i vi20xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i20 + 16)));
602       const __m128i vk20xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 496 * sizeof(int8_t)))));
603       const __m128i vi20xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i20 + 20)));
604       const __m128i vk20xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 500 * sizeof(int8_t)))));
605       i20 += 24;
606 
607       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi20x0123, vk20x0123));
608       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi20x4567, vk20x4567));
609       vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi20x89AB, vk20x89AB));
610       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi20xCDEF, vk20xCDEF));
611       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_mullo_epi32(vi20xGHIJ, vk20xGHIJ));
612       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_mullo_epi32(vi20xKLMN, vk20xKLMN));
613 
614       const __m128i vi21x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i21)));
615       const __m128i vk21x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 504 * sizeof(int8_t)))));
616       const __m128i vi21x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i21 + 4)));
617       const __m128i vk21x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 508 * sizeof(int8_t)))));
618       const __m128i vi21x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i21 + 8)));
619       const __m128i vk21x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 512 * sizeof(int8_t)))));
620       const __m128i vi21xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i21 + 12)));
621       const __m128i vk21xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 516 * sizeof(int8_t)))));
622       const __m128i vi21xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i21 + 16)));
623       const __m128i vk21xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 520 * sizeof(int8_t)))));
624       const __m128i vi21xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i21 + 20)));
625       const __m128i vk21xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 524 * sizeof(int8_t)))));
626       i21 += 24;
627 
628       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi21x0123, vk21x0123));
629       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi21x4567, vk21x4567));
630       vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi21x89AB, vk21x89AB));
631       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi21xCDEF, vk21xCDEF));
632       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_mullo_epi32(vi21xGHIJ, vk21xGHIJ));
633       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_mullo_epi32(vi21xKLMN, vk21xKLMN));
634 
635       const __m128i vi22x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i22)));
636       const __m128i vk22x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 528 * sizeof(int8_t)))));
637       const __m128i vi22x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i22 + 4)));
638       const __m128i vk22x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 532 * sizeof(int8_t)))));
639       const __m128i vi22x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i22 + 8)));
640       const __m128i vk22x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 536 * sizeof(int8_t)))));
641       const __m128i vi22xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i22 + 12)));
642       const __m128i vk22xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 540 * sizeof(int8_t)))));
643       const __m128i vi22xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i22 + 16)));
644       const __m128i vk22xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 544 * sizeof(int8_t)))));
645       const __m128i vi22xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i22 + 20)));
646       const __m128i vk22xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 548 * sizeof(int8_t)))));
647       i22 += 24;
648 
649       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi22x0123, vk22x0123));
650       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi22x4567, vk22x4567));
651       vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi22x89AB, vk22x89AB));
652       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi22xCDEF, vk22xCDEF));
653       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_mullo_epi32(vi22xGHIJ, vk22xGHIJ));
654       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_mullo_epi32(vi22xKLMN, vk22xKLMN));
655 
656       const __m128i vi23x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i23)));
657       const __m128i vk23x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 552 * sizeof(int8_t)))));
658       const __m128i vi23x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i23 + 4)));
659       const __m128i vk23x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 556 * sizeof(int8_t)))));
660       const __m128i vi23x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i23 + 8)));
661       const __m128i vk23x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 560 * sizeof(int8_t)))));
662       const __m128i vi23xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i23 + 12)));
663       const __m128i vk23xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 564 * sizeof(int8_t)))));
664       const __m128i vi23xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i23 + 16)));
665       const __m128i vk23xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 568 * sizeof(int8_t)))));
666       const __m128i vi23xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i23 + 20)));
667       const __m128i vk23xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 572 * sizeof(int8_t)))));
668       i23 += 24;
669 
670       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi23x0123, vk23x0123));
671       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi23x4567, vk23x4567));
672       vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi23x89AB, vk23x89AB));
673       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi23xCDEF, vk23xCDEF));
674       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_mullo_epi32(vi23xGHIJ, vk23xGHIJ));
675       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_mullo_epi32(vi23xKLMN, vk23xKLMN));
676 
677       const __m128i vi24x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i24)));
678       const __m128i vk24x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 576 * sizeof(int8_t)))));
679       const __m128i vi24x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i24 + 4)));
680       const __m128i vk24x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 580 * sizeof(int8_t)))));
681       const __m128i vi24x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i24 + 8)));
682       const __m128i vk24x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 584 * sizeof(int8_t)))));
683       const __m128i vi24xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i24 + 12)));
684       const __m128i vk24xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 588 * sizeof(int8_t)))));
685       const __m128i vi24xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i24 + 16)));
686       const __m128i vk24xGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 592 * sizeof(int8_t)))));
687       const __m128i vi24xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i24 + 20)));
688       const __m128i vk24xKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 24 * sizeof(int32_t) + 596 * sizeof(int8_t)))));
689       i24 += 24;
690 
691       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi24x0123, vk24x0123));
692       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi24x4567, vk24x4567));
693       vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi24x89AB, vk24x89AB));
694       vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi24xCDEF, vk24xCDEF));
695       vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_mullo_epi32(vi24xGHIJ, vk24xGHIJ));
696       vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_mullo_epi32(vi24xKLMN, vk24xKLMN));
697 
698       w = (const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 600 * sizeof(int8_t));
699 
700       __m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
701       __m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
702       __m128 vscaled89AB = _mm_cvtepi32_ps(vacc89AB);
703       __m128 vscaledCDEF = _mm_cvtepi32_ps(vaccCDEF);
704       __m128 vscaledGHIJ = _mm_cvtepi32_ps(vaccGHIJ);
705       __m128 vscaledKLMN = _mm_cvtepi32_ps(vaccKLMN);
706 
707       const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
708       vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
709       vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
710       vscaled89AB = _mm_mul_ps(vscaled89AB, vscale);
711       vscaledCDEF = _mm_mul_ps(vscaledCDEF, vscale);
712       vscaledGHIJ = _mm_mul_ps(vscaledGHIJ, vscale);
713       vscaledKLMN = _mm_mul_ps(vscaledKLMN, vscale);
714 
715       const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
716       vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
717       vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
718       vscaled89AB = _mm_min_ps(vscaled89AB, voutput_max_less_zero_point);
719       vscaledCDEF = _mm_min_ps(vscaledCDEF, voutput_max_less_zero_point);
720       vscaledGHIJ = _mm_min_ps(vscaledGHIJ, voutput_max_less_zero_point);
721       vscaledKLMN = _mm_min_ps(vscaledKLMN, voutput_max_less_zero_point);
722 
723       vacc0123 = _mm_cvtps_epi32(vscaled0123);
724       vacc4567 = _mm_cvtps_epi32(vscaled4567);
725       vacc89AB = _mm_cvtps_epi32(vscaled89AB);
726       vaccCDEF = _mm_cvtps_epi32(vscaledCDEF);
727       vaccGHIJ = _mm_cvtps_epi32(vscaledGHIJ);
728       vaccKLMN = _mm_cvtps_epi32(vscaledKLMN);
729 
730       const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
731       __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
732       __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
733       __m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point);
734 
735       const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
736       __m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
737       vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
738       __m128i voutGHIJKLMNGHIJKLMN = _mm_packs_epi16(voutGHIJKLMN, voutGHIJKLMN);
739       voutGHIJKLMNGHIJKLMN = _mm_max_epi8(voutGHIJKLMNGHIJKLMN, voutput_min);
740 
741       _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
742       _mm_storel_epi64((__m128i*) (output + 16), voutGHIJKLMNGHIJKLMN);
743       output += 24;
744     }
745     if XNN_UNLIKELY(c != 0) {
746       const int8_t* k = (const int8_t*) ((const int32_t*) w + 24);
747       do {
748         __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
749 
750         const __m128i vi0x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i0)));
751         const __m128i vk0x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) k)));
752         i0 += 4;
753 
754         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi0x0123, vk0x0123));
755         const __m128i vi1x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i1)));
756         const __m128i vk1x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 24))));
757         i1 += 4;
758 
759         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi1x0123, vk1x0123));
760         const __m128i vi2x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i2)));
761         const __m128i vk2x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 48))));
762         i2 += 4;
763 
764         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi2x0123, vk2x0123));
765         const __m128i vi3x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i3)));
766         const __m128i vk3x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 72))));
767         i3 += 4;
768 
769         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi3x0123, vk3x0123));
770         const __m128i vi4x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i4)));
771         const __m128i vk4x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 96))));
772         i4 += 4;
773 
774         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi4x0123, vk4x0123));
775         const __m128i vi5x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i5)));
776         const __m128i vk5x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 120))));
777         i5 += 4;
778 
779         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi5x0123, vk5x0123));
780         const __m128i vi6x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i6)));
781         const __m128i vk6x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 144))));
782         i6 += 4;
783 
784         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi6x0123, vk6x0123));
785         const __m128i vi7x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i7)));
786         const __m128i vk7x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 168))));
787         i7 += 4;
788 
789         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi7x0123, vk7x0123));
790         const __m128i vi8x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i8)));
791         const __m128i vk8x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 192))));
792         i8 += 4;
793 
794         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi8x0123, vk8x0123));
795         const __m128i vi9x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i9)));
796         const __m128i vk9x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 216))));
797         i9 += 4;
798 
799         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi9x0123, vk9x0123));
800         const __m128i vi10x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i10)));
801         const __m128i vk10x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 240))));
802         i10 += 4;
803 
804         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi10x0123, vk10x0123));
805         const __m128i vi11x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i11)));
806         const __m128i vk11x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 264))));
807         i11 += 4;
808 
809         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi11x0123, vk11x0123));
810         const __m128i vi12x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i12)));
811         const __m128i vk12x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 288))));
812         i12 += 4;
813 
814         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi12x0123, vk12x0123));
815         const __m128i vi13x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i13)));
816         const __m128i vk13x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 312))));
817         i13 += 4;
818 
819         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi13x0123, vk13x0123));
820         const __m128i vi14x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i14)));
821         const __m128i vk14x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 336))));
822         i14 += 4;
823 
824         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi14x0123, vk14x0123));
825         const __m128i vi15x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i15)));
826         const __m128i vk15x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 360))));
827         i15 += 4;
828 
829         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi15x0123, vk15x0123));
830         const __m128i vi16x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i16)));
831         const __m128i vk16x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 384))));
832         i16 += 4;
833 
834         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi16x0123, vk16x0123));
835         const __m128i vi17x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i17)));
836         const __m128i vk17x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 408))));
837         i17 += 4;
838 
839         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi17x0123, vk17x0123));
840         const __m128i vi18x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i18)));
841         const __m128i vk18x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 432))));
842         i18 += 4;
843 
844         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi18x0123, vk18x0123));
845         const __m128i vi19x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i19)));
846         const __m128i vk19x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 456))));
847         i19 += 4;
848 
849         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi19x0123, vk19x0123));
850         const __m128i vi20x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i20)));
851         const __m128i vk20x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 480))));
852         i20 += 4;
853 
854         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi20x0123, vk20x0123));
855         const __m128i vi21x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i21)));
856         const __m128i vk21x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 504))));
857         i21 += 4;
858 
859         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi21x0123, vk21x0123));
860         const __m128i vi22x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i22)));
861         const __m128i vk22x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 528))));
862         i22 += 4;
863 
864         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi22x0123, vk22x0123));
865         const __m128i vi23x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i23)));
866         const __m128i vk23x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 552))));
867         i23 += 4;
868 
869         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi23x0123, vk23x0123));
870         const __m128i vi24x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i24)));
871         const __m128i vk24x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 576))));
872         i24 += 4;
873 
874         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi24x0123, vk24x0123));
875 
876         k += 4;
877 
878         __m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
879         vscaled0123 = _mm_mul_ps(vscaled0123, _mm_load_ps(params->fp32_sse4.scale));
880         vscaled0123 = _mm_min_ps(vscaled0123, _mm_load_ps(params->fp32_sse4.output_max_less_zero_point));
881         vacc0123 = _mm_cvtps_epi32(vscaled0123);
882 
883         w = (const void*) ((const int32_t*) w + 4);
884 
885         const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
886         __m128i vout0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc0123), voutput_zero_point);
887 
888         vout0123 = _mm_packs_epi16(vout0123, vout0123);
889         vout0123 = _mm_max_epi8(vout0123, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
890 
891         if XNN_LIKELY(c >= 4) {
892           _mm_storeu_si32(output, vout0123);
893           output += 4;
894           c -= 4;
895         } else {
896           if (c & 2) {
897             unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123, 0));
898             vout0123 = _mm_srli_epi32(vout0123, 16);
899             output += 2;
900           }
901           if (c & 1) {
902             *output = (int8_t) _mm_extract_epi8(vout0123, 0);
903             output += 1;
904           }
905           c = 0;
906         }
907       } while (c != 0);
908     }
909 
910     output = (int8_t*) ((uintptr_t) output + output_increment);
911   } while (--output_width != 0);
912 }
913