1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-dwconv/unipass-sse-mul32.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <immintrin.h>
13
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 #include <xnnpack/unaligned.h>
17
18
xnn_qu8_dwconv_minmax_fp32_ukernel_up16x25__sse41_mul32(size_t channels,size_t output_width,const uint8_t ** input,const void * weights,uint8_t * output,size_t input_stride,size_t output_increment,size_t input_offset,const uint8_t * zero,const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_qu8_dwconv_minmax_fp32_ukernel_up16x25__sse41_mul32(
20 size_t channels,
21 size_t output_width,
22 const uint8_t** input,
23 const void* weights,
24 uint8_t* output,
25 size_t input_stride,
26 size_t output_increment,
27 size_t input_offset,
28 const uint8_t* zero,
29 const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
30 {
31 assert(channels != 0);
32 assert(output_width != 0);
33
34 const __m128i vk_zero_point = _mm_cvtepu16_epi32(_mm_loadl_epi64((const __m128i*) params->fp32_sse2.kernel_zero_point));
35 do {
36 const uint8_t* i0 = input[0];
37 assert(i0 != NULL);
38 if XNN_UNPREDICTABLE(i0 != zero) {
39 i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
40 }
41 const uint8_t* i1 = input[1];
42 assert(i1 != NULL);
43 if XNN_UNPREDICTABLE(i1 != zero) {
44 i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
45 }
46 const uint8_t* i2 = input[2];
47 assert(i2 != NULL);
48 if XNN_UNPREDICTABLE(i2 != zero) {
49 i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
50 }
51 const uint8_t* i3 = input[3];
52 assert(i3 != NULL);
53 if XNN_UNPREDICTABLE(i3 != zero) {
54 i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
55 }
56 const uint8_t* i4 = input[4];
57 assert(i4 != NULL);
58 if XNN_UNPREDICTABLE(i4 != zero) {
59 i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
60 }
61 const uint8_t* i5 = input[5];
62 assert(i5 != NULL);
63 if XNN_UNPREDICTABLE(i5 != zero) {
64 i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
65 }
66 const uint8_t* i6 = input[6];
67 assert(i6 != NULL);
68 if XNN_UNPREDICTABLE(i6 != zero) {
69 i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
70 }
71 const uint8_t* i7 = input[7];
72 assert(i7 != NULL);
73 if XNN_UNPREDICTABLE(i7 != zero) {
74 i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
75 }
76 const uint8_t* i8 = input[8];
77 assert(i8 != NULL);
78 if XNN_UNPREDICTABLE(i8 != zero) {
79 i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
80 }
81 const uint8_t* i9 = input[9];
82 assert(i9 != NULL);
83 if XNN_UNPREDICTABLE(i9 != zero) {
84 i9 = (const uint8_t*) ((uintptr_t) i9 + input_offset);
85 }
86 const uint8_t* i10 = input[10];
87 assert(i10 != NULL);
88 if XNN_UNPREDICTABLE(i10 != zero) {
89 i10 = (const uint8_t*) ((uintptr_t) i10 + input_offset);
90 }
91 const uint8_t* i11 = input[11];
92 assert(i11 != NULL);
93 if XNN_UNPREDICTABLE(i11 != zero) {
94 i11 = (const uint8_t*) ((uintptr_t) i11 + input_offset);
95 }
96 const uint8_t* i12 = input[12];
97 assert(i12 != NULL);
98 if XNN_UNPREDICTABLE(i12 != zero) {
99 i12 = (const uint8_t*) ((uintptr_t) i12 + input_offset);
100 }
101 const uint8_t* i13 = input[13];
102 assert(i13 != NULL);
103 if XNN_UNPREDICTABLE(i13 != zero) {
104 i13 = (const uint8_t*) ((uintptr_t) i13 + input_offset);
105 }
106 const uint8_t* i14 = input[14];
107 assert(i14 != NULL);
108 if XNN_UNPREDICTABLE(i14 != zero) {
109 i14 = (const uint8_t*) ((uintptr_t) i14 + input_offset);
110 }
111 const uint8_t* i15 = input[15];
112 assert(i15 != NULL);
113 if XNN_UNPREDICTABLE(i15 != zero) {
114 i15 = (const uint8_t*) ((uintptr_t) i15 + input_offset);
115 }
116 const uint8_t* i16 = input[16];
117 assert(i16 != NULL);
118 if XNN_UNPREDICTABLE(i16 != zero) {
119 i16 = (const uint8_t*) ((uintptr_t) i16 + input_offset);
120 }
121 const uint8_t* i17 = input[17];
122 assert(i17 != NULL);
123 if XNN_UNPREDICTABLE(i17 != zero) {
124 i17 = (const uint8_t*) ((uintptr_t) i17 + input_offset);
125 }
126 const uint8_t* i18 = input[18];
127 assert(i18 != NULL);
128 if XNN_UNPREDICTABLE(i18 != zero) {
129 i18 = (const uint8_t*) ((uintptr_t) i18 + input_offset);
130 }
131 const uint8_t* i19 = input[19];
132 assert(i19 != NULL);
133 if XNN_UNPREDICTABLE(i19 != zero) {
134 i19 = (const uint8_t*) ((uintptr_t) i19 + input_offset);
135 }
136 const uint8_t* i20 = input[20];
137 assert(i20 != NULL);
138 if XNN_UNPREDICTABLE(i20 != zero) {
139 i20 = (const uint8_t*) ((uintptr_t) i20 + input_offset);
140 }
141 const uint8_t* i21 = input[21];
142 assert(i21 != NULL);
143 if XNN_UNPREDICTABLE(i21 != zero) {
144 i21 = (const uint8_t*) ((uintptr_t) i21 + input_offset);
145 }
146 const uint8_t* i22 = input[22];
147 assert(i22 != NULL);
148 if XNN_UNPREDICTABLE(i22 != zero) {
149 i22 = (const uint8_t*) ((uintptr_t) i22 + input_offset);
150 }
151 const uint8_t* i23 = input[23];
152 assert(i23 != NULL);
153 if XNN_UNPREDICTABLE(i23 != zero) {
154 i23 = (const uint8_t*) ((uintptr_t) i23 + input_offset);
155 }
156 const uint8_t* i24 = input[24];
157 assert(i24 != NULL);
158 if XNN_UNPREDICTABLE(i24 != zero) {
159 i24 = (const uint8_t*) ((uintptr_t) i24 + input_offset);
160 }
161 input = (const uint8_t**) ((uintptr_t) input + input_stride);
162
163 size_t c = channels;
164 const void* w = weights;
165 for (; c >= 16; c -= 16) {
166 __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
167 __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
168 __m128i vacc89AB = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 8));
169 __m128i vaccCDEF = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 12));
170
171
172 const __m128i vi0x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i0)));
173 const __m128i vk0x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 0 * sizeof(uint8_t))))), vk_zero_point);
174 const __m128i vi0x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i0 + 4)));
175 const __m128i vk0x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 4 * sizeof(uint8_t))))), vk_zero_point);
176 const __m128i vi0x89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i0 + 8)));
177 const __m128i vk0x89AB = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 8 * sizeof(uint8_t))))), vk_zero_point);
178 const __m128i vi0xCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i0 + 12)));
179 const __m128i vk0xCDEF = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 12 * sizeof(uint8_t))))), vk_zero_point);
180 i0 += 16;
181
182 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi0x0123, vk0x0123));
183 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi0x4567, vk0x4567));
184 vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi0x89AB, vk0x89AB));
185 vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi0xCDEF, vk0xCDEF));
186
187 const __m128i vi1x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i1)));
188 const __m128i vk1x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 16 * sizeof(uint8_t))))), vk_zero_point);
189 const __m128i vi1x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i1 + 4)));
190 const __m128i vk1x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 20 * sizeof(uint8_t))))), vk_zero_point);
191 const __m128i vi1x89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i1 + 8)));
192 const __m128i vk1x89AB = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 24 * sizeof(uint8_t))))), vk_zero_point);
193 const __m128i vi1xCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i1 + 12)));
194 const __m128i vk1xCDEF = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 28 * sizeof(uint8_t))))), vk_zero_point);
195 i1 += 16;
196
197 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi1x0123, vk1x0123));
198 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi1x4567, vk1x4567));
199 vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi1x89AB, vk1x89AB));
200 vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi1xCDEF, vk1xCDEF));
201
202 const __m128i vi2x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i2)));
203 const __m128i vk2x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 32 * sizeof(uint8_t))))), vk_zero_point);
204 const __m128i vi2x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i2 + 4)));
205 const __m128i vk2x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 36 * sizeof(uint8_t))))), vk_zero_point);
206 const __m128i vi2x89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i2 + 8)));
207 const __m128i vk2x89AB = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 40 * sizeof(uint8_t))))), vk_zero_point);
208 const __m128i vi2xCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i2 + 12)));
209 const __m128i vk2xCDEF = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 44 * sizeof(uint8_t))))), vk_zero_point);
210 i2 += 16;
211
212 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi2x0123, vk2x0123));
213 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi2x4567, vk2x4567));
214 vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi2x89AB, vk2x89AB));
215 vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi2xCDEF, vk2xCDEF));
216
217 const __m128i vi3x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i3)));
218 const __m128i vk3x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 48 * sizeof(uint8_t))))), vk_zero_point);
219 const __m128i vi3x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i3 + 4)));
220 const __m128i vk3x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 52 * sizeof(uint8_t))))), vk_zero_point);
221 const __m128i vi3x89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i3 + 8)));
222 const __m128i vk3x89AB = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 56 * sizeof(uint8_t))))), vk_zero_point);
223 const __m128i vi3xCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i3 + 12)));
224 const __m128i vk3xCDEF = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 60 * sizeof(uint8_t))))), vk_zero_point);
225 i3 += 16;
226
227 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi3x0123, vk3x0123));
228 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi3x4567, vk3x4567));
229 vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi3x89AB, vk3x89AB));
230 vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi3xCDEF, vk3xCDEF));
231
232 const __m128i vi4x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i4)));
233 const __m128i vk4x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 64 * sizeof(uint8_t))))), vk_zero_point);
234 const __m128i vi4x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i4 + 4)));
235 const __m128i vk4x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 68 * sizeof(uint8_t))))), vk_zero_point);
236 const __m128i vi4x89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i4 + 8)));
237 const __m128i vk4x89AB = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 72 * sizeof(uint8_t))))), vk_zero_point);
238 const __m128i vi4xCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i4 + 12)));
239 const __m128i vk4xCDEF = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 76 * sizeof(uint8_t))))), vk_zero_point);
240 i4 += 16;
241
242 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi4x0123, vk4x0123));
243 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi4x4567, vk4x4567));
244 vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi4x89AB, vk4x89AB));
245 vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi4xCDEF, vk4xCDEF));
246
247 const __m128i vi5x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i5)));
248 const __m128i vk5x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 80 * sizeof(uint8_t))))), vk_zero_point);
249 const __m128i vi5x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i5 + 4)));
250 const __m128i vk5x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 84 * sizeof(uint8_t))))), vk_zero_point);
251 const __m128i vi5x89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i5 + 8)));
252 const __m128i vk5x89AB = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 88 * sizeof(uint8_t))))), vk_zero_point);
253 const __m128i vi5xCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i5 + 12)));
254 const __m128i vk5xCDEF = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 92 * sizeof(uint8_t))))), vk_zero_point);
255 i5 += 16;
256
257 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi5x0123, vk5x0123));
258 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi5x4567, vk5x4567));
259 vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi5x89AB, vk5x89AB));
260 vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi5xCDEF, vk5xCDEF));
261
262 const __m128i vi6x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i6)));
263 const __m128i vk6x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 96 * sizeof(uint8_t))))), vk_zero_point);
264 const __m128i vi6x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i6 + 4)));
265 const __m128i vk6x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 100 * sizeof(uint8_t))))), vk_zero_point);
266 const __m128i vi6x89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i6 + 8)));
267 const __m128i vk6x89AB = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 104 * sizeof(uint8_t))))), vk_zero_point);
268 const __m128i vi6xCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i6 + 12)));
269 const __m128i vk6xCDEF = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 108 * sizeof(uint8_t))))), vk_zero_point);
270 i6 += 16;
271
272 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi6x0123, vk6x0123));
273 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi6x4567, vk6x4567));
274 vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi6x89AB, vk6x89AB));
275 vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi6xCDEF, vk6xCDEF));
276
277 const __m128i vi7x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i7)));
278 const __m128i vk7x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 112 * sizeof(uint8_t))))), vk_zero_point);
279 const __m128i vi7x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i7 + 4)));
280 const __m128i vk7x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 116 * sizeof(uint8_t))))), vk_zero_point);
281 const __m128i vi7x89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i7 + 8)));
282 const __m128i vk7x89AB = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 120 * sizeof(uint8_t))))), vk_zero_point);
283 const __m128i vi7xCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i7 + 12)));
284 const __m128i vk7xCDEF = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 124 * sizeof(uint8_t))))), vk_zero_point);
285 i7 += 16;
286
287 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi7x0123, vk7x0123));
288 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi7x4567, vk7x4567));
289 vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi7x89AB, vk7x89AB));
290 vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi7xCDEF, vk7xCDEF));
291
292 const __m128i vi8x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i8)));
293 const __m128i vk8x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 128 * sizeof(uint8_t))))), vk_zero_point);
294 const __m128i vi8x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i8 + 4)));
295 const __m128i vk8x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 132 * sizeof(uint8_t))))), vk_zero_point);
296 const __m128i vi8x89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i8 + 8)));
297 const __m128i vk8x89AB = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 136 * sizeof(uint8_t))))), vk_zero_point);
298 const __m128i vi8xCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i8 + 12)));
299 const __m128i vk8xCDEF = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 140 * sizeof(uint8_t))))), vk_zero_point);
300 i8 += 16;
301
302 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi8x0123, vk8x0123));
303 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi8x4567, vk8x4567));
304 vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi8x89AB, vk8x89AB));
305 vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi8xCDEF, vk8xCDEF));
306
307 const __m128i vi9x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i9)));
308 const __m128i vk9x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 144 * sizeof(uint8_t))))), vk_zero_point);
309 const __m128i vi9x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i9 + 4)));
310 const __m128i vk9x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 148 * sizeof(uint8_t))))), vk_zero_point);
311 const __m128i vi9x89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i9 + 8)));
312 const __m128i vk9x89AB = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 152 * sizeof(uint8_t))))), vk_zero_point);
313 const __m128i vi9xCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i9 + 12)));
314 const __m128i vk9xCDEF = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 156 * sizeof(uint8_t))))), vk_zero_point);
315 i9 += 16;
316
317 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi9x0123, vk9x0123));
318 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi9x4567, vk9x4567));
319 vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi9x89AB, vk9x89AB));
320 vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi9xCDEF, vk9xCDEF));
321
322 const __m128i vi10x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i10)));
323 const __m128i vk10x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 160 * sizeof(uint8_t))))), vk_zero_point);
324 const __m128i vi10x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i10 + 4)));
325 const __m128i vk10x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 164 * sizeof(uint8_t))))), vk_zero_point);
326 const __m128i vi10x89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i10 + 8)));
327 const __m128i vk10x89AB = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 168 * sizeof(uint8_t))))), vk_zero_point);
328 const __m128i vi10xCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i10 + 12)));
329 const __m128i vk10xCDEF = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 172 * sizeof(uint8_t))))), vk_zero_point);
330 i10 += 16;
331
332 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi10x0123, vk10x0123));
333 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi10x4567, vk10x4567));
334 vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi10x89AB, vk10x89AB));
335 vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi10xCDEF, vk10xCDEF));
336
337 const __m128i vi11x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i11)));
338 const __m128i vk11x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 176 * sizeof(uint8_t))))), vk_zero_point);
339 const __m128i vi11x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i11 + 4)));
340 const __m128i vk11x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 180 * sizeof(uint8_t))))), vk_zero_point);
341 const __m128i vi11x89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i11 + 8)));
342 const __m128i vk11x89AB = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 184 * sizeof(uint8_t))))), vk_zero_point);
343 const __m128i vi11xCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i11 + 12)));
344 const __m128i vk11xCDEF = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 188 * sizeof(uint8_t))))), vk_zero_point);
345 i11 += 16;
346
347 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi11x0123, vk11x0123));
348 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi11x4567, vk11x4567));
349 vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi11x89AB, vk11x89AB));
350 vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi11xCDEF, vk11xCDEF));
351
352 const __m128i vi12x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i12)));
353 const __m128i vk12x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 192 * sizeof(uint8_t))))), vk_zero_point);
354 const __m128i vi12x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i12 + 4)));
355 const __m128i vk12x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 196 * sizeof(uint8_t))))), vk_zero_point);
356 const __m128i vi12x89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i12 + 8)));
357 const __m128i vk12x89AB = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 200 * sizeof(uint8_t))))), vk_zero_point);
358 const __m128i vi12xCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i12 + 12)));
359 const __m128i vk12xCDEF = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 204 * sizeof(uint8_t))))), vk_zero_point);
360 i12 += 16;
361
362 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi12x0123, vk12x0123));
363 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi12x4567, vk12x4567));
364 vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi12x89AB, vk12x89AB));
365 vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi12xCDEF, vk12xCDEF));
366
367 const __m128i vi13x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i13)));
368 const __m128i vk13x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 208 * sizeof(uint8_t))))), vk_zero_point);
369 const __m128i vi13x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i13 + 4)));
370 const __m128i vk13x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 212 * sizeof(uint8_t))))), vk_zero_point);
371 const __m128i vi13x89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i13 + 8)));
372 const __m128i vk13x89AB = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 216 * sizeof(uint8_t))))), vk_zero_point);
373 const __m128i vi13xCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i13 + 12)));
374 const __m128i vk13xCDEF = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 220 * sizeof(uint8_t))))), vk_zero_point);
375 i13 += 16;
376
377 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi13x0123, vk13x0123));
378 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi13x4567, vk13x4567));
379 vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi13x89AB, vk13x89AB));
380 vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi13xCDEF, vk13xCDEF));
381
382 const __m128i vi14x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i14)));
383 const __m128i vk14x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 224 * sizeof(uint8_t))))), vk_zero_point);
384 const __m128i vi14x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i14 + 4)));
385 const __m128i vk14x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 228 * sizeof(uint8_t))))), vk_zero_point);
386 const __m128i vi14x89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i14 + 8)));
387 const __m128i vk14x89AB = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 232 * sizeof(uint8_t))))), vk_zero_point);
388 const __m128i vi14xCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i14 + 12)));
389 const __m128i vk14xCDEF = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 236 * sizeof(uint8_t))))), vk_zero_point);
390 i14 += 16;
391
392 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi14x0123, vk14x0123));
393 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi14x4567, vk14x4567));
394 vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi14x89AB, vk14x89AB));
395 vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi14xCDEF, vk14xCDEF));
396
397 const __m128i vi15x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i15)));
398 const __m128i vk15x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 240 * sizeof(uint8_t))))), vk_zero_point);
399 const __m128i vi15x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i15 + 4)));
400 const __m128i vk15x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 244 * sizeof(uint8_t))))), vk_zero_point);
401 const __m128i vi15x89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i15 + 8)));
402 const __m128i vk15x89AB = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 248 * sizeof(uint8_t))))), vk_zero_point);
403 const __m128i vi15xCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i15 + 12)));
404 const __m128i vk15xCDEF = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 252 * sizeof(uint8_t))))), vk_zero_point);
405 i15 += 16;
406
407 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi15x0123, vk15x0123));
408 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi15x4567, vk15x4567));
409 vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi15x89AB, vk15x89AB));
410 vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi15xCDEF, vk15xCDEF));
411
412 const __m128i vi16x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i16)));
413 const __m128i vk16x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 256 * sizeof(uint8_t))))), vk_zero_point);
414 const __m128i vi16x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i16 + 4)));
415 const __m128i vk16x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 260 * sizeof(uint8_t))))), vk_zero_point);
416 const __m128i vi16x89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i16 + 8)));
417 const __m128i vk16x89AB = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 264 * sizeof(uint8_t))))), vk_zero_point);
418 const __m128i vi16xCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i16 + 12)));
419 const __m128i vk16xCDEF = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 268 * sizeof(uint8_t))))), vk_zero_point);
420 i16 += 16;
421
422 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi16x0123, vk16x0123));
423 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi16x4567, vk16x4567));
424 vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi16x89AB, vk16x89AB));
425 vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi16xCDEF, vk16xCDEF));
426
427 const __m128i vi17x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i17)));
428 const __m128i vk17x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 272 * sizeof(uint8_t))))), vk_zero_point);
429 const __m128i vi17x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i17 + 4)));
430 const __m128i vk17x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 276 * sizeof(uint8_t))))), vk_zero_point);
431 const __m128i vi17x89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i17 + 8)));
432 const __m128i vk17x89AB = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 280 * sizeof(uint8_t))))), vk_zero_point);
433 const __m128i vi17xCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i17 + 12)));
434 const __m128i vk17xCDEF = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 284 * sizeof(uint8_t))))), vk_zero_point);
435 i17 += 16;
436
437 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi17x0123, vk17x0123));
438 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi17x4567, vk17x4567));
439 vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi17x89AB, vk17x89AB));
440 vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi17xCDEF, vk17xCDEF));
441
442 const __m128i vi18x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i18)));
443 const __m128i vk18x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 288 * sizeof(uint8_t))))), vk_zero_point);
444 const __m128i vi18x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i18 + 4)));
445 const __m128i vk18x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 292 * sizeof(uint8_t))))), vk_zero_point);
446 const __m128i vi18x89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i18 + 8)));
447 const __m128i vk18x89AB = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 296 * sizeof(uint8_t))))), vk_zero_point);
448 const __m128i vi18xCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i18 + 12)));
449 const __m128i vk18xCDEF = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 300 * sizeof(uint8_t))))), vk_zero_point);
450 i18 += 16;
451
452 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi18x0123, vk18x0123));
453 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi18x4567, vk18x4567));
454 vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi18x89AB, vk18x89AB));
455 vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi18xCDEF, vk18xCDEF));
456
457 const __m128i vi19x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i19)));
458 const __m128i vk19x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 304 * sizeof(uint8_t))))), vk_zero_point);
459 const __m128i vi19x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i19 + 4)));
460 const __m128i vk19x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 308 * sizeof(uint8_t))))), vk_zero_point);
461 const __m128i vi19x89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i19 + 8)));
462 const __m128i vk19x89AB = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 312 * sizeof(uint8_t))))), vk_zero_point);
463 const __m128i vi19xCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i19 + 12)));
464 const __m128i vk19xCDEF = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 316 * sizeof(uint8_t))))), vk_zero_point);
465 i19 += 16;
466
467 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi19x0123, vk19x0123));
468 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi19x4567, vk19x4567));
469 vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi19x89AB, vk19x89AB));
470 vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi19xCDEF, vk19xCDEF));
471
472 const __m128i vi20x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i20)));
473 const __m128i vk20x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 320 * sizeof(uint8_t))))), vk_zero_point);
474 const __m128i vi20x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i20 + 4)));
475 const __m128i vk20x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 324 * sizeof(uint8_t))))), vk_zero_point);
476 const __m128i vi20x89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i20 + 8)));
477 const __m128i vk20x89AB = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 328 * sizeof(uint8_t))))), vk_zero_point);
478 const __m128i vi20xCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i20 + 12)));
479 const __m128i vk20xCDEF = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 332 * sizeof(uint8_t))))), vk_zero_point);
480 i20 += 16;
481
482 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi20x0123, vk20x0123));
483 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi20x4567, vk20x4567));
484 vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi20x89AB, vk20x89AB));
485 vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi20xCDEF, vk20xCDEF));
486
487 const __m128i vi21x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i21)));
488 const __m128i vk21x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 336 * sizeof(uint8_t))))), vk_zero_point);
489 const __m128i vi21x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i21 + 4)));
490 const __m128i vk21x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 340 * sizeof(uint8_t))))), vk_zero_point);
491 const __m128i vi21x89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i21 + 8)));
492 const __m128i vk21x89AB = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 344 * sizeof(uint8_t))))), vk_zero_point);
493 const __m128i vi21xCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i21 + 12)));
494 const __m128i vk21xCDEF = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 348 * sizeof(uint8_t))))), vk_zero_point);
495 i21 += 16;
496
497 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi21x0123, vk21x0123));
498 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi21x4567, vk21x4567));
499 vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi21x89AB, vk21x89AB));
500 vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi21xCDEF, vk21xCDEF));
501
502 const __m128i vi22x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i22)));
503 const __m128i vk22x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 352 * sizeof(uint8_t))))), vk_zero_point);
504 const __m128i vi22x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i22 + 4)));
505 const __m128i vk22x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 356 * sizeof(uint8_t))))), vk_zero_point);
506 const __m128i vi22x89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i22 + 8)));
507 const __m128i vk22x89AB = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 360 * sizeof(uint8_t))))), vk_zero_point);
508 const __m128i vi22xCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i22 + 12)));
509 const __m128i vk22xCDEF = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 364 * sizeof(uint8_t))))), vk_zero_point);
510 i22 += 16;
511
512 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi22x0123, vk22x0123));
513 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi22x4567, vk22x4567));
514 vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi22x89AB, vk22x89AB));
515 vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi22xCDEF, vk22xCDEF));
516
517 const __m128i vi23x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i23)));
518 const __m128i vk23x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 368 * sizeof(uint8_t))))), vk_zero_point);
519 const __m128i vi23x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i23 + 4)));
520 const __m128i vk23x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 372 * sizeof(uint8_t))))), vk_zero_point);
521 const __m128i vi23x89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i23 + 8)));
522 const __m128i vk23x89AB = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 376 * sizeof(uint8_t))))), vk_zero_point);
523 const __m128i vi23xCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i23 + 12)));
524 const __m128i vk23xCDEF = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 380 * sizeof(uint8_t))))), vk_zero_point);
525 i23 += 16;
526
527 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi23x0123, vk23x0123));
528 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi23x4567, vk23x4567));
529 vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi23x89AB, vk23x89AB));
530 vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi23xCDEF, vk23xCDEF));
531
532 const __m128i vi24x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i24)));
533 const __m128i vk24x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 384 * sizeof(uint8_t))))), vk_zero_point);
534 const __m128i vi24x4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i24 + 4)));
535 const __m128i vk24x4567 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 388 * sizeof(uint8_t))))), vk_zero_point);
536 const __m128i vi24x89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i24 + 8)));
537 const __m128i vk24x89AB = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 392 * sizeof(uint8_t))))), vk_zero_point);
538 const __m128i vi24xCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i24 + 12)));
539 const __m128i vk24xCDEF = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 396 * sizeof(uint8_t))))), vk_zero_point);
540 i24 += 16;
541
542 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi24x0123, vk24x0123));
543 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi24x4567, vk24x4567));
544 vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vi24x89AB, vk24x89AB));
545 vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vi24xCDEF, vk24xCDEF));
546
547 w = (const void*) ((uintptr_t) w + 16 * sizeof(int32_t) + 400 * sizeof(uint8_t));
548
549 __m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
550 __m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
551 __m128 vscaled89AB = _mm_cvtepi32_ps(vacc89AB);
552 __m128 vscaledCDEF = _mm_cvtepi32_ps(vaccCDEF);
553
554 const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
555 vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
556 vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
557 vscaled89AB = _mm_mul_ps(vscaled89AB, vscale);
558 vscaledCDEF = _mm_mul_ps(vscaledCDEF, vscale);
559
560 const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
561 vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
562 vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
563 vscaled89AB = _mm_min_ps(vscaled89AB, voutput_max_less_zero_point);
564 vscaledCDEF = _mm_min_ps(vscaledCDEF, voutput_max_less_zero_point);
565
566 vacc0123 = _mm_cvtps_epi32(vscaled0123);
567 vacc4567 = _mm_cvtps_epi32(vscaled4567);
568 vacc89AB = _mm_cvtps_epi32(vscaled89AB);
569 vaccCDEF = _mm_cvtps_epi32(vscaledCDEF);
570
571 const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
572 __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
573 __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
574
575 const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
576 __m128i vout0123456789ABCDEF = _mm_packus_epi16(vout01234567, vout89ABCDEF);
577 vout0123456789ABCDEF = _mm_max_epu8(vout0123456789ABCDEF, voutput_min);
578
579 _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
580 output += 16;
581 }
582 if XNN_UNLIKELY(c != 0) {
583 const uint8_t* k = (const uint8_t*) ((const int32_t*) w + 16);
584 do {
585 __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
586
587 const __m128i vi0x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i0)));
588 const __m128i vk0x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) k))), vk_zero_point);
589 i0 += 4;
590
591 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi0x0123, vk0x0123));
592 const __m128i vi1x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i1)));
593 const __m128i vk1x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 16)))), vk_zero_point);
594 i1 += 4;
595
596 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi1x0123, vk1x0123));
597 const __m128i vi2x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i2)));
598 const __m128i vk2x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 32)))), vk_zero_point);
599 i2 += 4;
600
601 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi2x0123, vk2x0123));
602 const __m128i vi3x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i3)));
603 const __m128i vk3x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 48)))), vk_zero_point);
604 i3 += 4;
605
606 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi3x0123, vk3x0123));
607 const __m128i vi4x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i4)));
608 const __m128i vk4x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 64)))), vk_zero_point);
609 i4 += 4;
610
611 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi4x0123, vk4x0123));
612 const __m128i vi5x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i5)));
613 const __m128i vk5x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 80)))), vk_zero_point);
614 i5 += 4;
615
616 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi5x0123, vk5x0123));
617 const __m128i vi6x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i6)));
618 const __m128i vk6x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 96)))), vk_zero_point);
619 i6 += 4;
620
621 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi6x0123, vk6x0123));
622 const __m128i vi7x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i7)));
623 const __m128i vk7x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 112)))), vk_zero_point);
624 i7 += 4;
625
626 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi7x0123, vk7x0123));
627 const __m128i vi8x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i8)));
628 const __m128i vk8x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 128)))), vk_zero_point);
629 i8 += 4;
630
631 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi8x0123, vk8x0123));
632 const __m128i vi9x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i9)));
633 const __m128i vk9x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 144)))), vk_zero_point);
634 i9 += 4;
635
636 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi9x0123, vk9x0123));
637 const __m128i vi10x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i10)));
638 const __m128i vk10x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 160)))), vk_zero_point);
639 i10 += 4;
640
641 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi10x0123, vk10x0123));
642 const __m128i vi11x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i11)));
643 const __m128i vk11x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 176)))), vk_zero_point);
644 i11 += 4;
645
646 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi11x0123, vk11x0123));
647 const __m128i vi12x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i12)));
648 const __m128i vk12x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 192)))), vk_zero_point);
649 i12 += 4;
650
651 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi12x0123, vk12x0123));
652 const __m128i vi13x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i13)));
653 const __m128i vk13x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 208)))), vk_zero_point);
654 i13 += 4;
655
656 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi13x0123, vk13x0123));
657 const __m128i vi14x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i14)));
658 const __m128i vk14x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 224)))), vk_zero_point);
659 i14 += 4;
660
661 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi14x0123, vk14x0123));
662 const __m128i vi15x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i15)));
663 const __m128i vk15x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 240)))), vk_zero_point);
664 i15 += 4;
665
666 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi15x0123, vk15x0123));
667 const __m128i vi16x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i16)));
668 const __m128i vk16x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 256)))), vk_zero_point);
669 i16 += 4;
670
671 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi16x0123, vk16x0123));
672 const __m128i vi17x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i17)));
673 const __m128i vk17x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 272)))), vk_zero_point);
674 i17 += 4;
675
676 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi17x0123, vk17x0123));
677 const __m128i vi18x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i18)));
678 const __m128i vk18x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 288)))), vk_zero_point);
679 i18 += 4;
680
681 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi18x0123, vk18x0123));
682 const __m128i vi19x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i19)));
683 const __m128i vk19x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 304)))), vk_zero_point);
684 i19 += 4;
685
686 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi19x0123, vk19x0123));
687 const __m128i vi20x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i20)));
688 const __m128i vk20x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 320)))), vk_zero_point);
689 i20 += 4;
690
691 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi20x0123, vk20x0123));
692 const __m128i vi21x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i21)));
693 const __m128i vk21x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 336)))), vk_zero_point);
694 i21 += 4;
695
696 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi21x0123, vk21x0123));
697 const __m128i vi22x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i22)));
698 const __m128i vk22x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 352)))), vk_zero_point);
699 i22 += 4;
700
701 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi22x0123, vk22x0123));
702 const __m128i vi23x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i23)));
703 const __m128i vk23x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 368)))), vk_zero_point);
704 i23 += 4;
705
706 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi23x0123, vk23x0123));
707 const __m128i vi24x0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i24)));
708 const __m128i vk24x0123 = _mm_sub_epi32(_mm_cvtepu8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 384)))), vk_zero_point);
709 i24 += 4;
710
711 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi24x0123, vk24x0123));
712
713 k += 4;
714
715 __m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
716 vscaled0123 = _mm_mul_ps(vscaled0123, _mm_load_ps(params->fp32_sse2.scale));
717 vscaled0123 = _mm_min_ps(vscaled0123, _mm_load_ps(params->fp32_sse2.output_max_less_zero_point));
718 vacc0123 = _mm_cvtps_epi32(vscaled0123);
719
720 w = (const void*) ((const int32_t*) w + 4);
721
722 const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
723 __m128i vout0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc0123), voutput_zero_point);
724
725 vout0123 = _mm_packus_epi16(vout0123, vout0123);
726 vout0123 = _mm_max_epu8(vout0123, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
727
728 if XNN_LIKELY(c >= 4) {
729 _mm_storeu_si32(output, vout0123);
730 output += 4;
731 c -= 4;
732 } else {
733 if (c & 2) {
734 unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123, 0));
735 vout0123 = _mm_srli_epi32(vout0123, 16);
736 output += 2;
737 }
738 if (c & 1) {
739 *output = (uint8_t) _mm_extract_epi8(vout0123, 0);
740 output += 1;
741 }
742 c = 0;
743 }
744 } while (c != 0);
745 }
746
747 output = (uint8_t*) ((uintptr_t) output + output_increment);
748 } while (--output_width != 0);
749 }
750