xref: /aosp_15_r20/external/XNNPACK/src/qu8-dwconv/gen/up8x25-minmax-fp32-sse41-mul32.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-dwconv/unipass-sse-mul32.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 #include <xnnpack/unaligned.h>
17 
18 
xnn_qu8_dwconv_minmax_fp32_ukernel_up8x25__sse41_mul32(size_t channels,size_t output_width,const uint8_t ** input,const void * weights,uint8_t * output,size_t input_stride,size_t output_increment,size_t input_offset,const uint8_t * zero,const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_qu8_dwconv_minmax_fp32_ukernel_up8x25__sse41_mul32(
20     size_t channels,
21     size_t output_width,
22     const uint8_t** input,
23     const void* weights,
24     uint8_t* output,
25     size_t input_stride,
26     size_t output_increment,
27     size_t input_offset,
28     const uint8_t* zero,
29     const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
30 {
31   assert(channels != 0);
32   assert(output_width != 0);
33 
34   const __m128i vk_zero_point = _mm_cvtepu16_epi32(_mm_loadl_epi64((const __m128i*) params->fp32_sse2.kernel_zero_point));
35   do {
36     const uint8_t* i0 = input[0];
37     assert(i0 != NULL);
38     if XNN_UNPREDICTABLE(i0 != zero) {
39       i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
40     }
41     const uint8_t* i1 = input[1];
42     assert(i1 != NULL);
43     if XNN_UNPREDICTABLE(i1 != zero) {
44       i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
45     }
46     const uint8_t* i2 = input[2];
47     assert(i2 != NULL);
48     if XNN_UNPREDICTABLE(i2 != zero) {
49       i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
50     }
51     const uint8_t* i3 = input[3];
52     assert(i3 != NULL);
53     if XNN_UNPREDICTABLE(i3 != zero) {
54       i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
55     }
56     const uint8_t* i4 = input[4];
57     assert(i4 != NULL);
58     if XNN_UNPREDICTABLE(i4 != zero) {
59       i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
60     }
61     const uint8_t* i5 = input[5];
62     assert(i5 != NULL);
63     if XNN_UNPREDICTABLE(i5 != zero) {
64       i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
65     }
66     const uint8_t* i6 = input[6];
67     assert(i6 != NULL);
68     if XNN_UNPREDICTABLE(i6 != zero) {
69       i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
70     }
71     const uint8_t* i7 = input[7];
72     assert(i7 != NULL);
73     if XNN_UNPREDICTABLE(i7 != zero) {
74       i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
75     }
76     const uint8_t* i8 = input[8];
77     assert(i8 != NULL);
78     if XNN_UNPREDICTABLE(i8 != zero) {
79       i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
80     }
81     const uint8_t* i9 = input[9];
82     assert(i9 != NULL);
83     if XNN_UNPREDICTABLE(i9 != zero) {
84       i9 = (const uint8_t*) ((uintptr_t) i9 + input_offset);
85     }
86     const uint8_t* i10 = input[10];
87     assert(i10 != NULL);
88     if XNN_UNPREDICTABLE(i10 != zero) {
89       i10 = (const uint8_t*) ((uintptr_t) i10 + input_offset);
90     }
91     const uint8_t* i11 = input[11];
92     assert(i11 != NULL);
93     if XNN_UNPREDICTABLE(i11 != zero) {
94       i11 = (const uint8_t*) ((uintptr_t) i11 + input_offset);
95     }
96     const uint8_t* i12 = input[12];
97     assert(i12 != NULL);
98     if XNN_UNPREDICTABLE(i12 != zero) {
99       i12 = (const uint8_t*) ((uintptr_t) i12 + input_offset);
100     }
101     const uint8_t* i13 = input[13];
102     assert(i13 != NULL);
103     if XNN_UNPREDICTABLE(i13 != zero) {
104       i13 = (const uint8_t*) ((uintptr_t) i13 + input_offset);
105     }
106     const uint8_t* i14 = input[14];
107     assert(i14 != NULL);
108     if XNN_UNPREDICTABLE(i14 != zero) {
109       i14 = (const uint8_t*) ((uintptr_t) i14 + input_offset);
110     }
111     const uint8_t* i15 = input[15];
112     assert(i15 != NULL);
113     if XNN_UNPREDICTABLE(i15 != zero) {
114       i15 = (const uint8_t*) ((uintptr_t) i15 + input_offset);
115     }
116     const uint8_t* i16 = input[16];
117     assert(i16 != NULL);
118     if XNN_UNPREDICTABLE(i16 != zero) {
119       i16 = (const uint8_t*) ((uintptr_t) i16 + input_offset);
120     }
121     const uint8_t* i17 = input[17];
122     assert(i17 != NULL);
123     if XNN_UNPREDICTABLE(i17 != zero) {
124       i17 = (const uint8_t*) ((uintptr_t) i17 + input_offset);
125     }
126     const uint8_t* i18 = input[18];
127     assert(i18 != NULL);
128     if XNN_UNPREDICTABLE(i18 != zero) {
129       i18 = (const uint8_t*) ((uintptr_t) i18 + input_offset);
130     }
131     const uint8_t* i19 = input[19];
132     assert(i19 != NULL);
133     if XNN_UNPREDICTABLE(i19 != zero) {
134       i19 = (const uint8_t*) ((uintptr_t) i19 + input_offset);
135     }
136     const uint8_t* i20 = input[20];
137     assert(i20 != NULL);
138     if XNN_UNPREDICTABLE(i20 != zero) {
139       i20 = (const uint8_t*) ((uintptr_t) i20 + input_offset);
140     }
141     const uint8_t* i21 = input[21];
142     assert(i21 != NULL);
143     if XNN_UNPREDICTABLE(i21 != zero) {
144       i21 = (const uint8_t*) ((uintptr_t) i21 + input_offset);
145     }
146     const uint8_t* i22 = input[22];
147     assert(i22 != NULL);
148     if XNN_UNPREDICTABLE(i22 != zero) {
149       i22 = (const uint8_t*) ((uintptr_t) i22 + input_offset);
150     }
151     const uint8_t* i23 = input[23];
152     assert(i23 != NULL);
153     if XNN_UNPREDICTABLE(i23 != zero) {
154       i23 = (const uint8_t*) ((uintptr_t) i23 + input_offset);
155     }
156     const uint8_t* i24 = input[24];
157     assert(i24 != NULL);
158     if XNN_UNPREDICTABLE(i24 != zero) {
159       i24 = (const uint8_t*) ((uintptr_t) i24 + input_offset);
160     }
161     input = (const uint8_t**) ((uintptr_t) input + input_stride);
162 
163     size_t c = channels;
164     const void* w = weights;
165     for (; c >= 8; c -= 8) {
166       __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
167       __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
168 
169 
170       const __m128i vi0x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i0)));
171       const __m128i vk0x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(uint8_t))))), vk_zero_point);
172       const __m128i vi0x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i0 + 4)));
173       const __m128i vk0x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 4 * sizeof(uint8_t))))), vk_zero_point);
174       i0 += 8;
175 
176       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi0x0123, vk0x0123));
177       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi0x4567, vk0x4567));
178 
179       const __m128i vi1x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i1)));
180       const __m128i vk1x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(uint8_t))))), vk_zero_point);
181       const __m128i vi1x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i1 + 4)));
182       const __m128i vk1x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 12 * sizeof(uint8_t))))), vk_zero_point);
183       i1 += 8;
184 
185       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi1x0123, vk1x0123));
186       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi1x4567, vk1x4567));
187 
188       const __m128i vi2x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i2)));
189       const __m128i vk2x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(uint8_t))))), vk_zero_point);
190       const __m128i vi2x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i2 + 4)));
191       const __m128i vk2x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 20 * sizeof(uint8_t))))), vk_zero_point);
192       i2 += 8;
193 
194       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi2x0123, vk2x0123));
195       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi2x4567, vk2x4567));
196 
197       const __m128i vi3x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i3)));
198       const __m128i vk3x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(uint8_t))))), vk_zero_point);
199       const __m128i vi3x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i3 + 4)));
200       const __m128i vk3x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 28 * sizeof(uint8_t))))), vk_zero_point);
201       i3 += 8;
202 
203       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi3x0123, vk3x0123));
204       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi3x4567, vk3x4567));
205 
206       const __m128i vi4x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i4)));
207       const __m128i vk4x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(uint8_t))))), vk_zero_point);
208       const __m128i vi4x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i4 + 4)));
209       const __m128i vk4x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 36 * sizeof(uint8_t))))), vk_zero_point);
210       i4 += 8;
211 
212       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi4x0123, vk4x0123));
213       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi4x4567, vk4x4567));
214 
215       const __m128i vi5x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i5)));
216       const __m128i vk5x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(uint8_t))))), vk_zero_point);
217       const __m128i vi5x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i5 + 4)));
218       const __m128i vk5x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 44 * sizeof(uint8_t))))), vk_zero_point);
219       i5 += 8;
220 
221       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi5x0123, vk5x0123));
222       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi5x4567, vk5x4567));
223 
224       const __m128i vi6x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i6)));
225       const __m128i vk6x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(uint8_t))))), vk_zero_point);
226       const __m128i vi6x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i6 + 4)));
227       const __m128i vk6x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 52 * sizeof(uint8_t))))), vk_zero_point);
228       i6 += 8;
229 
230       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi6x0123, vk6x0123));
231       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi6x4567, vk6x4567));
232 
233       const __m128i vi7x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i7)));
234       const __m128i vk7x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(uint8_t))))), vk_zero_point);
235       const __m128i vi7x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i7 + 4)));
236       const __m128i vk7x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 60 * sizeof(uint8_t))))), vk_zero_point);
237       i7 += 8;
238 
239       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi7x0123, vk7x0123));
240       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi7x4567, vk7x4567));
241 
242       const __m128i vi8x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i8)));
243       const __m128i vk8x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(uint8_t))))), vk_zero_point);
244       const __m128i vi8x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i8 + 4)));
245       const __m128i vk8x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 68 * sizeof(uint8_t))))), vk_zero_point);
246       i8 += 8;
247 
248       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi8x0123, vk8x0123));
249       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi8x4567, vk8x4567));
250 
251       const __m128i vi9x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i9)));
252       const __m128i vk9x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 72 * sizeof(uint8_t))))), vk_zero_point);
253       const __m128i vi9x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i9 + 4)));
254       const __m128i vk9x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 76 * sizeof(uint8_t))))), vk_zero_point);
255       i9 += 8;
256 
257       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi9x0123, vk9x0123));
258       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi9x4567, vk9x4567));
259 
260       const __m128i vi10x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i10)));
261       const __m128i vk10x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 80 * sizeof(uint8_t))))), vk_zero_point);
262       const __m128i vi10x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i10 + 4)));
263       const __m128i vk10x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 84 * sizeof(uint8_t))))), vk_zero_point);
264       i10 += 8;
265 
266       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi10x0123, vk10x0123));
267       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi10x4567, vk10x4567));
268 
269       const __m128i vi11x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i11)));
270       const __m128i vk11x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 88 * sizeof(uint8_t))))), vk_zero_point);
271       const __m128i vi11x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i11 + 4)));
272       const __m128i vk11x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 92 * sizeof(uint8_t))))), vk_zero_point);
273       i11 += 8;
274 
275       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi11x0123, vk11x0123));
276       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi11x4567, vk11x4567));
277 
278       const __m128i vi12x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i12)));
279       const __m128i vk12x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 96 * sizeof(uint8_t))))), vk_zero_point);
280       const __m128i vi12x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i12 + 4)));
281       const __m128i vk12x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 100 * sizeof(uint8_t))))), vk_zero_point);
282       i12 += 8;
283 
284       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi12x0123, vk12x0123));
285       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi12x4567, vk12x4567));
286 
287       const __m128i vi13x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i13)));
288       const __m128i vk13x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 104 * sizeof(uint8_t))))), vk_zero_point);
289       const __m128i vi13x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i13 + 4)));
290       const __m128i vk13x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 108 * sizeof(uint8_t))))), vk_zero_point);
291       i13 += 8;
292 
293       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi13x0123, vk13x0123));
294       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi13x4567, vk13x4567));
295 
296       const __m128i vi14x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i14)));
297       const __m128i vk14x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 112 * sizeof(uint8_t))))), vk_zero_point);
298       const __m128i vi14x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i14 + 4)));
299       const __m128i vk14x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 116 * sizeof(uint8_t))))), vk_zero_point);
300       i14 += 8;
301 
302       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi14x0123, vk14x0123));
303       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi14x4567, vk14x4567));
304 
305       const __m128i vi15x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i15)));
306       const __m128i vk15x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 120 * sizeof(uint8_t))))), vk_zero_point);
307       const __m128i vi15x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i15 + 4)));
308       const __m128i vk15x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 124 * sizeof(uint8_t))))), vk_zero_point);
309       i15 += 8;
310 
311       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi15x0123, vk15x0123));
312       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi15x4567, vk15x4567));
313 
314       const __m128i vi16x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i16)));
315       const __m128i vk16x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 128 * sizeof(uint8_t))))), vk_zero_point);
316       const __m128i vi16x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i16 + 4)));
317       const __m128i vk16x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 132 * sizeof(uint8_t))))), vk_zero_point);
318       i16 += 8;
319 
320       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi16x0123, vk16x0123));
321       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi16x4567, vk16x4567));
322 
323       const __m128i vi17x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i17)));
324       const __m128i vk17x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 136 * sizeof(uint8_t))))), vk_zero_point);
325       const __m128i vi17x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i17 + 4)));
326       const __m128i vk17x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 140 * sizeof(uint8_t))))), vk_zero_point);
327       i17 += 8;
328 
329       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi17x0123, vk17x0123));
330       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi17x4567, vk17x4567));
331 
332       const __m128i vi18x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i18)));
333       const __m128i vk18x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 144 * sizeof(uint8_t))))), vk_zero_point);
334       const __m128i vi18x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i18 + 4)));
335       const __m128i vk18x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 148 * sizeof(uint8_t))))), vk_zero_point);
336       i18 += 8;
337 
338       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi18x0123, vk18x0123));
339       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi18x4567, vk18x4567));
340 
341       const __m128i vi19x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i19)));
342       const __m128i vk19x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 152 * sizeof(uint8_t))))), vk_zero_point);
343       const __m128i vi19x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i19 + 4)));
344       const __m128i vk19x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 156 * sizeof(uint8_t))))), vk_zero_point);
345       i19 += 8;
346 
347       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi19x0123, vk19x0123));
348       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi19x4567, vk19x4567));
349 
350       const __m128i vi20x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i20)));
351       const __m128i vk20x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 160 * sizeof(uint8_t))))), vk_zero_point);
352       const __m128i vi20x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i20 + 4)));
353       const __m128i vk20x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 164 * sizeof(uint8_t))))), vk_zero_point);
354       i20 += 8;
355 
356       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi20x0123, vk20x0123));
357       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi20x4567, vk20x4567));
358 
359       const __m128i vi21x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i21)));
360       const __m128i vk21x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 168 * sizeof(uint8_t))))), vk_zero_point);
361       const __m128i vi21x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i21 + 4)));
362       const __m128i vk21x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 172 * sizeof(uint8_t))))), vk_zero_point);
363       i21 += 8;
364 
365       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi21x0123, vk21x0123));
366       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi21x4567, vk21x4567));
367 
368       const __m128i vi22x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i22)));
369       const __m128i vk22x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 176 * sizeof(uint8_t))))), vk_zero_point);
370       const __m128i vi22x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i22 + 4)));
371       const __m128i vk22x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 180 * sizeof(uint8_t))))), vk_zero_point);
372       i22 += 8;
373 
374       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi22x0123, vk22x0123));
375       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi22x4567, vk22x4567));
376 
377       const __m128i vi23x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i23)));
378       const __m128i vk23x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 184 * sizeof(uint8_t))))), vk_zero_point);
379       const __m128i vi23x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i23 + 4)));
380       const __m128i vk23x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 188 * sizeof(uint8_t))))), vk_zero_point);
381       i23 += 8;
382 
383       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi23x0123, vk23x0123));
384       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi23x4567, vk23x4567));
385 
386       const __m128i vi24x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i24)));
387       const __m128i vk24x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 192 * sizeof(uint8_t))))), vk_zero_point);
388       const __m128i vi24x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i24 + 4)));
389       const __m128i vk24x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 196 * sizeof(uint8_t))))), vk_zero_point);
390       i24 += 8;
391 
392       vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi24x0123, vk24x0123));
393       vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi24x4567, vk24x4567));
394 
395       w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 200 * sizeof(uint8_t));
396 
397       __m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
398       __m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
399 
400       const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
401       vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
402       vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
403 
404       const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
405       vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
406       vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
407 
408       vacc0123 = _mm_cvtps_epi32(vscaled0123);
409       vacc4567 = _mm_cvtps_epi32(vscaled4567);
410 
411       const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
412       __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
413 
414       const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
415       __m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
416       vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
417 
418       _mm_storel_epi64((__m128i*) output, vout0123456701234567);
419       output += 8;
420     }
421     if XNN_UNLIKELY(c != 0) {
422       const uint8_t* k = (const uint8_t*) ((const int32_t*) w + 8);
423       do {
424         __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
425 
426         const __m128i vi0x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i0)));
427         const __m128i vk0x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) k))), vk_zero_point);
428         i0 += 4;
429 
430         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi0x0123, vk0x0123));
431         const __m128i vi1x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i1)));
432         const __m128i vk1x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 8)))), vk_zero_point);
433         i1 += 4;
434 
435         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi1x0123, vk1x0123));
436         const __m128i vi2x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i2)));
437         const __m128i vk2x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 16)))), vk_zero_point);
438         i2 += 4;
439 
440         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi2x0123, vk2x0123));
441         const __m128i vi3x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i3)));
442         const __m128i vk3x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 24)))), vk_zero_point);
443         i3 += 4;
444 
445         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi3x0123, vk3x0123));
446         const __m128i vi4x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i4)));
447         const __m128i vk4x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 32)))), vk_zero_point);
448         i4 += 4;
449 
450         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi4x0123, vk4x0123));
451         const __m128i vi5x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i5)));
452         const __m128i vk5x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 40)))), vk_zero_point);
453         i5 += 4;
454 
455         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi5x0123, vk5x0123));
456         const __m128i vi6x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i6)));
457         const __m128i vk6x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 48)))), vk_zero_point);
458         i6 += 4;
459 
460         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi6x0123, vk6x0123));
461         const __m128i vi7x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i7)));
462         const __m128i vk7x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 56)))), vk_zero_point);
463         i7 += 4;
464 
465         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi7x0123, vk7x0123));
466         const __m128i vi8x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i8)));
467         const __m128i vk8x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 64)))), vk_zero_point);
468         i8 += 4;
469 
470         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi8x0123, vk8x0123));
471         const __m128i vi9x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i9)));
472         const __m128i vk9x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 72)))), vk_zero_point);
473         i9 += 4;
474 
475         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi9x0123, vk9x0123));
476         const __m128i vi10x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i10)));
477         const __m128i vk10x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 80)))), vk_zero_point);
478         i10 += 4;
479 
480         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi10x0123, vk10x0123));
481         const __m128i vi11x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i11)));
482         const __m128i vk11x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 88)))), vk_zero_point);
483         i11 += 4;
484 
485         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi11x0123, vk11x0123));
486         const __m128i vi12x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i12)));
487         const __m128i vk12x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 96)))), vk_zero_point);
488         i12 += 4;
489 
490         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi12x0123, vk12x0123));
491         const __m128i vi13x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i13)));
492         const __m128i vk13x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 104)))), vk_zero_point);
493         i13 += 4;
494 
495         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi13x0123, vk13x0123));
496         const __m128i vi14x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i14)));
497         const __m128i vk14x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 112)))), vk_zero_point);
498         i14 += 4;
499 
500         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi14x0123, vk14x0123));
501         const __m128i vi15x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i15)));
502         const __m128i vk15x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 120)))), vk_zero_point);
503         i15 += 4;
504 
505         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi15x0123, vk15x0123));
506         const __m128i vi16x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i16)));
507         const __m128i vk16x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 128)))), vk_zero_point);
508         i16 += 4;
509 
510         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi16x0123, vk16x0123));
511         const __m128i vi17x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i17)));
512         const __m128i vk17x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 136)))), vk_zero_point);
513         i17 += 4;
514 
515         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi17x0123, vk17x0123));
516         const __m128i vi18x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i18)));
517         const __m128i vk18x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 144)))), vk_zero_point);
518         i18 += 4;
519 
520         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi18x0123, vk18x0123));
521         const __m128i vi19x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i19)));
522         const __m128i vk19x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 152)))), vk_zero_point);
523         i19 += 4;
524 
525         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi19x0123, vk19x0123));
526         const __m128i vi20x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i20)));
527         const __m128i vk20x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 160)))), vk_zero_point);
528         i20 += 4;
529 
530         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi20x0123, vk20x0123));
531         const __m128i vi21x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i21)));
532         const __m128i vk21x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 168)))), vk_zero_point);
533         i21 += 4;
534 
535         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi21x0123, vk21x0123));
536         const __m128i vi22x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i22)));
537         const __m128i vk22x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 176)))), vk_zero_point);
538         i22 += 4;
539 
540         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi22x0123, vk22x0123));
541         const __m128i vi23x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i23)));
542         const __m128i vk23x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 184)))), vk_zero_point);
543         i23 += 4;
544 
545         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi23x0123, vk23x0123));
546         const __m128i vi24x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i24)));
547         const __m128i vk24x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 192)))), vk_zero_point);
548         i24 += 4;
549 
550         vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi24x0123, vk24x0123));
551 
552         k += 4;
553 
554         __m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
555         vscaled0123 = _mm_mul_ps(vscaled0123, _mm_load_ps(params->fp32_sse2.scale));
556         vscaled0123 = _mm_min_ps(vscaled0123, _mm_load_ps(params->fp32_sse2.output_max_less_zero_point));
557         vacc0123 = _mm_cvtps_epi32(vscaled0123);
558 
559         w = (const void*) ((const int32_t*) w + 4);
560 
561         const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
562         __m128i vout0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc0123), voutput_zero_point);
563 
564         vout0123 = _mm_packus_epi16(vout0123, vout0123);
565         vout0123 = _mm_max_epu8(vout0123, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
566 
567         if XNN_LIKELY(c >= 4) {
568           _mm_storeu_si32(output, vout0123);
569           output += 4;
570           c -= 4;
571         } else {
572           if (c & 2) {
573             unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123, 0));
574             vout0123 = _mm_srli_epi32(vout0123, 16);
575             output += 2;
576           }
577           if (c & 1) {
578             *output = (uint8_t) _mm_extract_epi8(vout0123, 0);
579             output += 1;
580           }
581           c = 0;
582         }
583       } while (c != 0);
584     }
585 
586     output = (uint8_t*) ((uintptr_t) output + output_increment);
587   } while (--output_width != 0);
588 }
589