xref: /aosp_15_r20/external/XNNPACK/src/qc8-dwconv/gen/up8x25-minmax-fp32-xop-mul32.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-dwconv/unipass-sse-mul32.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #if defined(__GNUC__) || defined(__clang__)
13   #include <x86intrin.h>
14 #else
15   #include <immintrin.h>
16   #include <ammintrin.h>
17 #endif
18 
19 #include <xnnpack/dwconv.h>
20 #include <xnnpack/intrinsics-polyfill.h>
21 #include <xnnpack/unaligned.h>
22 
23 
xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__xop_mul32(size_t channels,size_t output_width,const int8_t ** input,const void * weights,int8_t * output,size_t input_stride,size_t output_increment,size_t input_offset,const int8_t * zero,const union xnn_qc8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])24 void xnn_qc8_dwconv_minmax_fp32_ukernel_up8x25__xop_mul32(
25     size_t channels,
26     size_t output_width,
27     const int8_t** input,
28     const void* weights,
29     int8_t* output,
30     size_t input_stride,
31     size_t output_increment,
32     size_t input_offset,
33     const int8_t* zero,
34     const union xnn_qc8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
35 {
36   assert(channels != 0);
37   assert(output_width != 0);
38 
39   do {
40     const int8_t* i0 = input[0];
41     assert(i0 != NULL);
42     if XNN_UNPREDICTABLE(i0 != zero) {
43       i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
44     }
45     const int8_t* i1 = input[1];
46     assert(i1 != NULL);
47     if XNN_UNPREDICTABLE(i1 != zero) {
48       i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
49     }
50     const int8_t* i2 = input[2];
51     assert(i2 != NULL);
52     if XNN_UNPREDICTABLE(i2 != zero) {
53       i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
54     }
55     const int8_t* i3 = input[3];
56     assert(i3 != NULL);
57     if XNN_UNPREDICTABLE(i3 != zero) {
58       i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
59     }
60     const int8_t* i4 = input[4];
61     assert(i4 != NULL);
62     if XNN_UNPREDICTABLE(i4 != zero) {
63       i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
64     }
65     const int8_t* i5 = input[5];
66     assert(i5 != NULL);
67     if XNN_UNPREDICTABLE(i5 != zero) {
68       i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
69     }
70     const int8_t* i6 = input[6];
71     assert(i6 != NULL);
72     if XNN_UNPREDICTABLE(i6 != zero) {
73       i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
74     }
75     const int8_t* i7 = input[7];
76     assert(i7 != NULL);
77     if XNN_UNPREDICTABLE(i7 != zero) {
78       i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
79     }
80     const int8_t* i8 = input[8];
81     assert(i8 != NULL);
82     if XNN_UNPREDICTABLE(i8 != zero) {
83       i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
84     }
85     const int8_t* i9 = input[9];
86     assert(i9 != NULL);
87     if XNN_UNPREDICTABLE(i9 != zero) {
88       i9 = (const int8_t*) ((uintptr_t) i9 + input_offset);
89     }
90     const int8_t* i10 = input[10];
91     assert(i10 != NULL);
92     if XNN_UNPREDICTABLE(i10 != zero) {
93       i10 = (const int8_t*) ((uintptr_t) i10 + input_offset);
94     }
95     const int8_t* i11 = input[11];
96     assert(i11 != NULL);
97     if XNN_UNPREDICTABLE(i11 != zero) {
98       i11 = (const int8_t*) ((uintptr_t) i11 + input_offset);
99     }
100     const int8_t* i12 = input[12];
101     assert(i12 != NULL);
102     if XNN_UNPREDICTABLE(i12 != zero) {
103       i12 = (const int8_t*) ((uintptr_t) i12 + input_offset);
104     }
105     const int8_t* i13 = input[13];
106     assert(i13 != NULL);
107     if XNN_UNPREDICTABLE(i13 != zero) {
108       i13 = (const int8_t*) ((uintptr_t) i13 + input_offset);
109     }
110     const int8_t* i14 = input[14];
111     assert(i14 != NULL);
112     if XNN_UNPREDICTABLE(i14 != zero) {
113       i14 = (const int8_t*) ((uintptr_t) i14 + input_offset);
114     }
115     const int8_t* i15 = input[15];
116     assert(i15 != NULL);
117     if XNN_UNPREDICTABLE(i15 != zero) {
118       i15 = (const int8_t*) ((uintptr_t) i15 + input_offset);
119     }
120     const int8_t* i16 = input[16];
121     assert(i16 != NULL);
122     if XNN_UNPREDICTABLE(i16 != zero) {
123       i16 = (const int8_t*) ((uintptr_t) i16 + input_offset);
124     }
125     const int8_t* i17 = input[17];
126     assert(i17 != NULL);
127     if XNN_UNPREDICTABLE(i17 != zero) {
128       i17 = (const int8_t*) ((uintptr_t) i17 + input_offset);
129     }
130     const int8_t* i18 = input[18];
131     assert(i18 != NULL);
132     if XNN_UNPREDICTABLE(i18 != zero) {
133       i18 = (const int8_t*) ((uintptr_t) i18 + input_offset);
134     }
135     const int8_t* i19 = input[19];
136     assert(i19 != NULL);
137     if XNN_UNPREDICTABLE(i19 != zero) {
138       i19 = (const int8_t*) ((uintptr_t) i19 + input_offset);
139     }
140     const int8_t* i20 = input[20];
141     assert(i20 != NULL);
142     if XNN_UNPREDICTABLE(i20 != zero) {
143       i20 = (const int8_t*) ((uintptr_t) i20 + input_offset);
144     }
145     const int8_t* i21 = input[21];
146     assert(i21 != NULL);
147     if XNN_UNPREDICTABLE(i21 != zero) {
148       i21 = (const int8_t*) ((uintptr_t) i21 + input_offset);
149     }
150     const int8_t* i22 = input[22];
151     assert(i22 != NULL);
152     if XNN_UNPREDICTABLE(i22 != zero) {
153       i22 = (const int8_t*) ((uintptr_t) i22 + input_offset);
154     }
155     const int8_t* i23 = input[23];
156     assert(i23 != NULL);
157     if XNN_UNPREDICTABLE(i23 != zero) {
158       i23 = (const int8_t*) ((uintptr_t) i23 + input_offset);
159     }
160     const int8_t* i24 = input[24];
161     assert(i24 != NULL);
162     if XNN_UNPREDICTABLE(i24 != zero) {
163       i24 = (const int8_t*) ((uintptr_t) i24 + input_offset);
164     }
165     input = (const int8_t**) ((uintptr_t) input + input_stride);
166 
167     size_t c = channels;
168     const void* w = weights;
169     for (; c >= 8; c -= 8) {
170       __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
171       __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
172 
173 
174       const __m128i vi0x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i0)));
175       const __m128i vk0x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t)))));
176       const __m128i vi0x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i0 + 4)));
177       const __m128i vk0x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 4 * sizeof(int8_t)))));
178       i0 += 8;
179 
180       vacc0123 = _mm_macc_epi32(vi0x0123, vk0x0123, vacc0123);
181       vacc4567 = _mm_macc_epi32(vi0x4567, vk0x4567, vacc4567);
182 
183       const __m128i vi1x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i1)));
184       const __m128i vk1x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t)))));
185       const __m128i vi1x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i1 + 4)));
186       const __m128i vk1x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 12 * sizeof(int8_t)))));
187       i1 += 8;
188 
189       vacc0123 = _mm_macc_epi32(vi1x0123, vk1x0123, vacc0123);
190       vacc4567 = _mm_macc_epi32(vi1x4567, vk1x4567, vacc4567);
191 
192       const __m128i vi2x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i2)));
193       const __m128i vk2x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t)))));
194       const __m128i vi2x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i2 + 4)));
195       const __m128i vk2x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 20 * sizeof(int8_t)))));
196       i2 += 8;
197 
198       vacc0123 = _mm_macc_epi32(vi2x0123, vk2x0123, vacc0123);
199       vacc4567 = _mm_macc_epi32(vi2x4567, vk2x4567, vacc4567);
200 
201       const __m128i vi3x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i3)));
202       const __m128i vk3x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t)))));
203       const __m128i vi3x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i3 + 4)));
204       const __m128i vk3x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 28 * sizeof(int8_t)))));
205       i3 += 8;
206 
207       vacc0123 = _mm_macc_epi32(vi3x0123, vk3x0123, vacc0123);
208       vacc4567 = _mm_macc_epi32(vi3x4567, vk3x4567, vacc4567);
209 
210       const __m128i vi4x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i4)));
211       const __m128i vk4x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t)))));
212       const __m128i vi4x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i4 + 4)));
213       const __m128i vk4x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 36 * sizeof(int8_t)))));
214       i4 += 8;
215 
216       vacc0123 = _mm_macc_epi32(vi4x0123, vk4x0123, vacc0123);
217       vacc4567 = _mm_macc_epi32(vi4x4567, vk4x4567, vacc4567);
218 
219       const __m128i vi5x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i5)));
220       const __m128i vk5x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t)))));
221       const __m128i vi5x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i5 + 4)));
222       const __m128i vk5x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 44 * sizeof(int8_t)))));
223       i5 += 8;
224 
225       vacc0123 = _mm_macc_epi32(vi5x0123, vk5x0123, vacc0123);
226       vacc4567 = _mm_macc_epi32(vi5x4567, vk5x4567, vacc4567);
227 
228       const __m128i vi6x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i6)));
229       const __m128i vk6x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(int8_t)))));
230       const __m128i vi6x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i6 + 4)));
231       const __m128i vk6x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 52 * sizeof(int8_t)))));
232       i6 += 8;
233 
234       vacc0123 = _mm_macc_epi32(vi6x0123, vk6x0123, vacc0123);
235       vacc4567 = _mm_macc_epi32(vi6x4567, vk6x4567, vacc4567);
236 
237       const __m128i vi7x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i7)));
238       const __m128i vk7x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(int8_t)))));
239       const __m128i vi7x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i7 + 4)));
240       const __m128i vk7x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 60 * sizeof(int8_t)))));
241       i7 += 8;
242 
243       vacc0123 = _mm_macc_epi32(vi7x0123, vk7x0123, vacc0123);
244       vacc4567 = _mm_macc_epi32(vi7x4567, vk7x4567, vacc4567);
245 
246       const __m128i vi8x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i8)));
247       const __m128i vk8x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(int8_t)))));
248       const __m128i vi8x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i8 + 4)));
249       const __m128i vk8x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 68 * sizeof(int8_t)))));
250       i8 += 8;
251 
252       vacc0123 = _mm_macc_epi32(vi8x0123, vk8x0123, vacc0123);
253       vacc4567 = _mm_macc_epi32(vi8x4567, vk8x4567, vacc4567);
254 
255       const __m128i vi9x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i9)));
256       const __m128i vk9x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 72 * sizeof(int8_t)))));
257       const __m128i vi9x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i9 + 4)));
258       const __m128i vk9x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 76 * sizeof(int8_t)))));
259       i9 += 8;
260 
261       vacc0123 = _mm_macc_epi32(vi9x0123, vk9x0123, vacc0123);
262       vacc4567 = _mm_macc_epi32(vi9x4567, vk9x4567, vacc4567);
263 
264       const __m128i vi10x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i10)));
265       const __m128i vk10x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 80 * sizeof(int8_t)))));
266       const __m128i vi10x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i10 + 4)));
267       const __m128i vk10x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 84 * sizeof(int8_t)))));
268       i10 += 8;
269 
270       vacc0123 = _mm_macc_epi32(vi10x0123, vk10x0123, vacc0123);
271       vacc4567 = _mm_macc_epi32(vi10x4567, vk10x4567, vacc4567);
272 
273       const __m128i vi11x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i11)));
274       const __m128i vk11x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 88 * sizeof(int8_t)))));
275       const __m128i vi11x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i11 + 4)));
276       const __m128i vk11x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 92 * sizeof(int8_t)))));
277       i11 += 8;
278 
279       vacc0123 = _mm_macc_epi32(vi11x0123, vk11x0123, vacc0123);
280       vacc4567 = _mm_macc_epi32(vi11x4567, vk11x4567, vacc4567);
281 
282       const __m128i vi12x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i12)));
283       const __m128i vk12x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 96 * sizeof(int8_t)))));
284       const __m128i vi12x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i12 + 4)));
285       const __m128i vk12x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 100 * sizeof(int8_t)))));
286       i12 += 8;
287 
288       vacc0123 = _mm_macc_epi32(vi12x0123, vk12x0123, vacc0123);
289       vacc4567 = _mm_macc_epi32(vi12x4567, vk12x4567, vacc4567);
290 
291       const __m128i vi13x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i13)));
292       const __m128i vk13x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 104 * sizeof(int8_t)))));
293       const __m128i vi13x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i13 + 4)));
294       const __m128i vk13x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 108 * sizeof(int8_t)))));
295       i13 += 8;
296 
297       vacc0123 = _mm_macc_epi32(vi13x0123, vk13x0123, vacc0123);
298       vacc4567 = _mm_macc_epi32(vi13x4567, vk13x4567, vacc4567);
299 
300       const __m128i vi14x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i14)));
301       const __m128i vk14x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 112 * sizeof(int8_t)))));
302       const __m128i vi14x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i14 + 4)));
303       const __m128i vk14x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 116 * sizeof(int8_t)))));
304       i14 += 8;
305 
306       vacc0123 = _mm_macc_epi32(vi14x0123, vk14x0123, vacc0123);
307       vacc4567 = _mm_macc_epi32(vi14x4567, vk14x4567, vacc4567);
308 
309       const __m128i vi15x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i15)));
310       const __m128i vk15x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 120 * sizeof(int8_t)))));
311       const __m128i vi15x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i15 + 4)));
312       const __m128i vk15x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 124 * sizeof(int8_t)))));
313       i15 += 8;
314 
315       vacc0123 = _mm_macc_epi32(vi15x0123, vk15x0123, vacc0123);
316       vacc4567 = _mm_macc_epi32(vi15x4567, vk15x4567, vacc4567);
317 
318       const __m128i vi16x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i16)));
319       const __m128i vk16x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 128 * sizeof(int8_t)))));
320       const __m128i vi16x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i16 + 4)));
321       const __m128i vk16x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 132 * sizeof(int8_t)))));
322       i16 += 8;
323 
324       vacc0123 = _mm_macc_epi32(vi16x0123, vk16x0123, vacc0123);
325       vacc4567 = _mm_macc_epi32(vi16x4567, vk16x4567, vacc4567);
326 
327       const __m128i vi17x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i17)));
328       const __m128i vk17x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 136 * sizeof(int8_t)))));
329       const __m128i vi17x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i17 + 4)));
330       const __m128i vk17x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 140 * sizeof(int8_t)))));
331       i17 += 8;
332 
333       vacc0123 = _mm_macc_epi32(vi17x0123, vk17x0123, vacc0123);
334       vacc4567 = _mm_macc_epi32(vi17x4567, vk17x4567, vacc4567);
335 
336       const __m128i vi18x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i18)));
337       const __m128i vk18x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 144 * sizeof(int8_t)))));
338       const __m128i vi18x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i18 + 4)));
339       const __m128i vk18x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 148 * sizeof(int8_t)))));
340       i18 += 8;
341 
342       vacc0123 = _mm_macc_epi32(vi18x0123, vk18x0123, vacc0123);
343       vacc4567 = _mm_macc_epi32(vi18x4567, vk18x4567, vacc4567);
344 
345       const __m128i vi19x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i19)));
346       const __m128i vk19x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 152 * sizeof(int8_t)))));
347       const __m128i vi19x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i19 + 4)));
348       const __m128i vk19x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 156 * sizeof(int8_t)))));
349       i19 += 8;
350 
351       vacc0123 = _mm_macc_epi32(vi19x0123, vk19x0123, vacc0123);
352       vacc4567 = _mm_macc_epi32(vi19x4567, vk19x4567, vacc4567);
353 
354       const __m128i vi20x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i20)));
355       const __m128i vk20x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 160 * sizeof(int8_t)))));
356       const __m128i vi20x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i20 + 4)));
357       const __m128i vk20x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 164 * sizeof(int8_t)))));
358       i20 += 8;
359 
360       vacc0123 = _mm_macc_epi32(vi20x0123, vk20x0123, vacc0123);
361       vacc4567 = _mm_macc_epi32(vi20x4567, vk20x4567, vacc4567);
362 
363       const __m128i vi21x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i21)));
364       const __m128i vk21x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 168 * sizeof(int8_t)))));
365       const __m128i vi21x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i21 + 4)));
366       const __m128i vk21x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 172 * sizeof(int8_t)))));
367       i21 += 8;
368 
369       vacc0123 = _mm_macc_epi32(vi21x0123, vk21x0123, vacc0123);
370       vacc4567 = _mm_macc_epi32(vi21x4567, vk21x4567, vacc4567);
371 
372       const __m128i vi22x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i22)));
373       const __m128i vk22x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 176 * sizeof(int8_t)))));
374       const __m128i vi22x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i22 + 4)));
375       const __m128i vk22x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 180 * sizeof(int8_t)))));
376       i22 += 8;
377 
378       vacc0123 = _mm_macc_epi32(vi22x0123, vk22x0123, vacc0123);
379       vacc4567 = _mm_macc_epi32(vi22x4567, vk22x4567, vacc4567);
380 
381       const __m128i vi23x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i23)));
382       const __m128i vk23x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 184 * sizeof(int8_t)))));
383       const __m128i vi23x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i23 + 4)));
384       const __m128i vk23x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 188 * sizeof(int8_t)))));
385       i23 += 8;
386 
387       vacc0123 = _mm_macc_epi32(vi23x0123, vk23x0123, vacc0123);
388       vacc4567 = _mm_macc_epi32(vi23x4567, vk23x4567, vacc4567);
389 
390       const __m128i vi24x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i24)));
391       const __m128i vk24x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 192 * sizeof(int8_t)))));
392       const __m128i vi24x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i24 + 4)));
393       const __m128i vk24x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 196 * sizeof(int8_t)))));
394       i24 += 8;
395 
396       vacc0123 = _mm_macc_epi32(vi24x0123, vk24x0123, vacc0123);
397       vacc4567 = _mm_macc_epi32(vi24x4567, vk24x4567, vacc4567);
398 
399       w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 200 * sizeof(int8_t));
400 
401       __m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
402       __m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
403 
404       const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
405       const __m128 vscale4567 = _mm_loadu_ps((const float*) w + 4);
406       w = (const void*) ((const float*) w + 8);
407       vscaled0123 = _mm_mul_ps(vscaled0123, vscale0123);
408       vscaled4567 = _mm_mul_ps(vscaled4567, vscale4567);
409 
410       const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
411       vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
412       vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
413 
414       vacc0123 = _mm_cvtps_epi32(vscaled0123);
415       vacc4567 = _mm_cvtps_epi32(vscaled4567);
416 
417       const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
418       __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
419 
420       const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
421       __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
422       vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
423 
424       _mm_storel_epi64((__m128i*) output, vout0123456701234567);
425       output += 8;
426     }
427     if XNN_UNLIKELY(c != 0) {
428       const int8_t* k = (const int8_t*) ((const int32_t*) w + 8);
429       do {
430         __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
431 
432         const __m128i vi0x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i0)));
433         const __m128i vk0x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) k)));
434         i0 += 4;
435 
436         vacc0123 = _mm_macc_epi32(vi0x0123, vk0x0123, vacc0123);
437         const __m128i vi1x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i1)));
438         const __m128i vk1x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 8))));
439         i1 += 4;
440 
441         vacc0123 = _mm_macc_epi32(vi1x0123, vk1x0123, vacc0123);
442         const __m128i vi2x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i2)));
443         const __m128i vk2x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 16))));
444         i2 += 4;
445 
446         vacc0123 = _mm_macc_epi32(vi2x0123, vk2x0123, vacc0123);
447         const __m128i vi3x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i3)));
448         const __m128i vk3x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 24))));
449         i3 += 4;
450 
451         vacc0123 = _mm_macc_epi32(vi3x0123, vk3x0123, vacc0123);
452         const __m128i vi4x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i4)));
453         const __m128i vk4x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 32))));
454         i4 += 4;
455 
456         vacc0123 = _mm_macc_epi32(vi4x0123, vk4x0123, vacc0123);
457         const __m128i vi5x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i5)));
458         const __m128i vk5x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 40))));
459         i5 += 4;
460 
461         vacc0123 = _mm_macc_epi32(vi5x0123, vk5x0123, vacc0123);
462         const __m128i vi6x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i6)));
463         const __m128i vk6x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 48))));
464         i6 += 4;
465 
466         vacc0123 = _mm_macc_epi32(vi6x0123, vk6x0123, vacc0123);
467         const __m128i vi7x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i7)));
468         const __m128i vk7x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 56))));
469         i7 += 4;
470 
471         vacc0123 = _mm_macc_epi32(vi7x0123, vk7x0123, vacc0123);
472         const __m128i vi8x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i8)));
473         const __m128i vk8x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 64))));
474         i8 += 4;
475 
476         vacc0123 = _mm_macc_epi32(vi8x0123, vk8x0123, vacc0123);
477         const __m128i vi9x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i9)));
478         const __m128i vk9x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 72))));
479         i9 += 4;
480 
481         vacc0123 = _mm_macc_epi32(vi9x0123, vk9x0123, vacc0123);
482         const __m128i vi10x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i10)));
483         const __m128i vk10x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 80))));
484         i10 += 4;
485 
486         vacc0123 = _mm_macc_epi32(vi10x0123, vk10x0123, vacc0123);
487         const __m128i vi11x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i11)));
488         const __m128i vk11x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 88))));
489         i11 += 4;
490 
491         vacc0123 = _mm_macc_epi32(vi11x0123, vk11x0123, vacc0123);
492         const __m128i vi12x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i12)));
493         const __m128i vk12x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 96))));
494         i12 += 4;
495 
496         vacc0123 = _mm_macc_epi32(vi12x0123, vk12x0123, vacc0123);
497         const __m128i vi13x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i13)));
498         const __m128i vk13x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 104))));
499         i13 += 4;
500 
501         vacc0123 = _mm_macc_epi32(vi13x0123, vk13x0123, vacc0123);
502         const __m128i vi14x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i14)));
503         const __m128i vk14x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 112))));
504         i14 += 4;
505 
506         vacc0123 = _mm_macc_epi32(vi14x0123, vk14x0123, vacc0123);
507         const __m128i vi15x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i15)));
508         const __m128i vk15x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 120))));
509         i15 += 4;
510 
511         vacc0123 = _mm_macc_epi32(vi15x0123, vk15x0123, vacc0123);
512         const __m128i vi16x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i16)));
513         const __m128i vk16x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 128))));
514         i16 += 4;
515 
516         vacc0123 = _mm_macc_epi32(vi16x0123, vk16x0123, vacc0123);
517         const __m128i vi17x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i17)));
518         const __m128i vk17x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 136))));
519         i17 += 4;
520 
521         vacc0123 = _mm_macc_epi32(vi17x0123, vk17x0123, vacc0123);
522         const __m128i vi18x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i18)));
523         const __m128i vk18x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 144))));
524         i18 += 4;
525 
526         vacc0123 = _mm_macc_epi32(vi18x0123, vk18x0123, vacc0123);
527         const __m128i vi19x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i19)));
528         const __m128i vk19x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 152))));
529         i19 += 4;
530 
531         vacc0123 = _mm_macc_epi32(vi19x0123, vk19x0123, vacc0123);
532         const __m128i vi20x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i20)));
533         const __m128i vk20x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 160))));
534         i20 += 4;
535 
536         vacc0123 = _mm_macc_epi32(vi20x0123, vk20x0123, vacc0123);
537         const __m128i vi21x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i21)));
538         const __m128i vk21x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 168))));
539         i21 += 4;
540 
541         vacc0123 = _mm_macc_epi32(vi21x0123, vk21x0123, vacc0123);
542         const __m128i vi22x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i22)));
543         const __m128i vk22x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 176))));
544         i22 += 4;
545 
546         vacc0123 = _mm_macc_epi32(vi22x0123, vk22x0123, vacc0123);
547         const __m128i vi23x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i23)));
548         const __m128i vk23x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 184))));
549         i23 += 4;
550 
551         vacc0123 = _mm_macc_epi32(vi23x0123, vk23x0123, vacc0123);
552         const __m128i vi24x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i24)));
553         const __m128i vk24x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 192))));
554         i24 += 4;
555 
556         vacc0123 = _mm_macc_epi32(vi24x0123, vk24x0123, vacc0123);
557 
558         k += 4;
559 
560         __m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
561         const __m128 vscale0123 = _mm_loadu_ps((const float*) ((uintptr_t) w + 8 * sizeof(int32_t) + 200 * sizeof(int8_t)));
562         vscaled0123 = _mm_mul_ps(vscaled0123, vscale0123);
563         vscaled0123 = _mm_min_ps(vscaled0123, _mm_load_ps(params->fp32_sse4.output_max_less_zero_point));
564         vacc0123 = _mm_cvtps_epi32(vscaled0123);
565 
566         w = (const void*) ((const int32_t*) w + 4);
567 
568         const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
569         __m128i vout0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc0123), voutput_zero_point);
570 
571         vout0123 = _mm_packs_epi16(vout0123, vout0123);
572         vout0123 = _mm_max_epi8(vout0123, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
573 
574         if XNN_LIKELY(c >= 4) {
575           _mm_storeu_si32(output, vout0123);
576           output += 4;
577           c -= 4;
578         } else {
579           if (c & 2) {
580             unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123, 0));
581             vout0123 = _mm_srli_epi32(vout0123, 16);
582             output += 2;
583           }
584           if (c & 1) {
585             *output = (int8_t) _mm_extract_epi8(vout0123, 0);
586             output += 1;
587           }
588           c = 0;
589         }
590       } while (c != 0);
591     }
592 
593     output = (int8_t*) ((uintptr_t) output + output_increment);
594   } while (--output_width != 0);
595 }
596