xref: /aosp_15_r20/external/XNNPACK/src/qs8-dwconv/gen/up16x9-minmax-fp32-xop-mul32.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-dwconv/unipass-sse-mul32.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #if defined(__GNUC__) || defined(__clang__)
13   #include <x86intrin.h>
14 #else
15   #include <immintrin.h>
16   #include <ammintrin.h>
17 #endif
18 
19 #include <xnnpack/dwconv.h>
20 #include <xnnpack/intrinsics-polyfill.h>
21 #include <xnnpack/unaligned.h>
22 
23 
xnn_qs8_dwconv_minmax_fp32_ukernel_up16x9__xop_mul32(size_t channels,size_t output_width,const int8_t ** input,const void * weights,int8_t * output,size_t input_stride,size_t output_increment,size_t input_offset,const int8_t * zero,const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])24 void xnn_qs8_dwconv_minmax_fp32_ukernel_up16x9__xop_mul32(
25     size_t channels,
26     size_t output_width,
27     const int8_t** input,
28     const void* weights,
29     int8_t* output,
30     size_t input_stride,
31     size_t output_increment,
32     size_t input_offset,
33     const int8_t* zero,
34     const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
35 {
36   assert(channels != 0);
37   assert(output_width != 0);
38 
39   do {
40     const int8_t* i0 = input[0];
41     assert(i0 != NULL);
42     if XNN_UNPREDICTABLE(i0 != zero) {
43       i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
44     }
45     const int8_t* i1 = input[1];
46     assert(i1 != NULL);
47     if XNN_UNPREDICTABLE(i1 != zero) {
48       i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
49     }
50     const int8_t* i2 = input[2];
51     assert(i2 != NULL);
52     if XNN_UNPREDICTABLE(i2 != zero) {
53       i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
54     }
55     const int8_t* i3 = input[3];
56     assert(i3 != NULL);
57     if XNN_UNPREDICTABLE(i3 != zero) {
58       i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
59     }
60     const int8_t* i4 = input[4];
61     assert(i4 != NULL);
62     if XNN_UNPREDICTABLE(i4 != zero) {
63       i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
64     }
65     const int8_t* i5 = input[5];
66     assert(i5 != NULL);
67     if XNN_UNPREDICTABLE(i5 != zero) {
68       i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
69     }
70     const int8_t* i6 = input[6];
71     assert(i6 != NULL);
72     if XNN_UNPREDICTABLE(i6 != zero) {
73       i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
74     }
75     const int8_t* i7 = input[7];
76     assert(i7 != NULL);
77     if XNN_UNPREDICTABLE(i7 != zero) {
78       i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
79     }
80     const int8_t* i8 = input[8];
81     assert(i8 != NULL);
82     if XNN_UNPREDICTABLE(i8 != zero) {
83       i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
84     }
85     input = (const int8_t**) ((uintptr_t) input + input_stride);
86 
87     size_t c = channels;
88     const void* w = weights;
89     for (; c >= 16; c -= 16) {
90       __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
91       __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
92       __m128i vacc89AB = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 8));
93       __m128i vaccCDEF = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 12));
94 
95 
96       const __m128i vi0x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i0)));
97       const __m128i vk0x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 0 * sizeof(int8_t)))));
98       const __m128i vi0x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i0 + 4)));
99       const __m128i vk0x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 4 * sizeof(int8_t)))));
100       const __m128i vi0x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i0 + 8)));
101       const __m128i vk0x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 8 * sizeof(int8_t)))));
102       const __m128i vi0xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i0 + 12)));
103       const __m128i vk0xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 12 * sizeof(int8_t)))));
104       i0 += 16;
105 
106       vacc0123 = _mm_macc_epi32(vi0x0123, vk0x0123, vacc0123);
107       vacc4567 = _mm_macc_epi32(vi0x4567, vk0x4567, vacc4567);
108       vacc89AB = _mm_macc_epi32(vi0x89AB, vk0x89AB, vacc89AB);
109       vaccCDEF = _mm_macc_epi32(vi0xCDEF, vk0xCDEF, vaccCDEF);
110 
111       const __m128i vi1x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i1)));
112       const __m128i vk1x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 16 * sizeof(int8_t)))));
113       const __m128i vi1x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i1 + 4)));
114       const __m128i vk1x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 20 * sizeof(int8_t)))));
115       const __m128i vi1x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i1 + 8)));
116       const __m128i vk1x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 24 * sizeof(int8_t)))));
117       const __m128i vi1xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i1 + 12)));
118       const __m128i vk1xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 28 * sizeof(int8_t)))));
119       i1 += 16;
120 
121       vacc0123 = _mm_macc_epi32(vi1x0123, vk1x0123, vacc0123);
122       vacc4567 = _mm_macc_epi32(vi1x4567, vk1x4567, vacc4567);
123       vacc89AB = _mm_macc_epi32(vi1x89AB, vk1x89AB, vacc89AB);
124       vaccCDEF = _mm_macc_epi32(vi1xCDEF, vk1xCDEF, vaccCDEF);
125 
126       const __m128i vi2x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i2)));
127       const __m128i vk2x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 32 * sizeof(int8_t)))));
128       const __m128i vi2x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i2 + 4)));
129       const __m128i vk2x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 36 * sizeof(int8_t)))));
130       const __m128i vi2x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i2 + 8)));
131       const __m128i vk2x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 40 * sizeof(int8_t)))));
132       const __m128i vi2xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i2 + 12)));
133       const __m128i vk2xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 44 * sizeof(int8_t)))));
134       i2 += 16;
135 
136       vacc0123 = _mm_macc_epi32(vi2x0123, vk2x0123, vacc0123);
137       vacc4567 = _mm_macc_epi32(vi2x4567, vk2x4567, vacc4567);
138       vacc89AB = _mm_macc_epi32(vi2x89AB, vk2x89AB, vacc89AB);
139       vaccCDEF = _mm_macc_epi32(vi2xCDEF, vk2xCDEF, vaccCDEF);
140 
141       const __m128i vi3x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i3)));
142       const __m128i vk3x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 48 * sizeof(int8_t)))));
143       const __m128i vi3x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i3 + 4)));
144       const __m128i vk3x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 52 * sizeof(int8_t)))));
145       const __m128i vi3x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i3 + 8)));
146       const __m128i vk3x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 56 * sizeof(int8_t)))));
147       const __m128i vi3xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i3 + 12)));
148       const __m128i vk3xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 60 * sizeof(int8_t)))));
149       i3 += 16;
150 
151       vacc0123 = _mm_macc_epi32(vi3x0123, vk3x0123, vacc0123);
152       vacc4567 = _mm_macc_epi32(vi3x4567, vk3x4567, vacc4567);
153       vacc89AB = _mm_macc_epi32(vi3x89AB, vk3x89AB, vacc89AB);
154       vaccCDEF = _mm_macc_epi32(vi3xCDEF, vk3xCDEF, vaccCDEF);
155 
156       const __m128i vi4x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i4)));
157       const __m128i vk4x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 64 * sizeof(int8_t)))));
158       const __m128i vi4x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i4 + 4)));
159       const __m128i vk4x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 68 * sizeof(int8_t)))));
160       const __m128i vi4x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i4 + 8)));
161       const __m128i vk4x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 72 * sizeof(int8_t)))));
162       const __m128i vi4xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i4 + 12)));
163       const __m128i vk4xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 76 * sizeof(int8_t)))));
164       i4 += 16;
165 
166       vacc0123 = _mm_macc_epi32(vi4x0123, vk4x0123, vacc0123);
167       vacc4567 = _mm_macc_epi32(vi4x4567, vk4x4567, vacc4567);
168       vacc89AB = _mm_macc_epi32(vi4x89AB, vk4x89AB, vacc89AB);
169       vaccCDEF = _mm_macc_epi32(vi4xCDEF, vk4xCDEF, vaccCDEF);
170 
171       const __m128i vi5x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i5)));
172       const __m128i vk5x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 80 * sizeof(int8_t)))));
173       const __m128i vi5x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i5 + 4)));
174       const __m128i vk5x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 84 * sizeof(int8_t)))));
175       const __m128i vi5x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i5 + 8)));
176       const __m128i vk5x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 88 * sizeof(int8_t)))));
177       const __m128i vi5xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i5 + 12)));
178       const __m128i vk5xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 92 * sizeof(int8_t)))));
179       i5 += 16;
180 
181       vacc0123 = _mm_macc_epi32(vi5x0123, vk5x0123, vacc0123);
182       vacc4567 = _mm_macc_epi32(vi5x4567, vk5x4567, vacc4567);
183       vacc89AB = _mm_macc_epi32(vi5x89AB, vk5x89AB, vacc89AB);
184       vaccCDEF = _mm_macc_epi32(vi5xCDEF, vk5xCDEF, vaccCDEF);
185 
186       const __m128i vi6x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i6)));
187       const __m128i vk6x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 96 * sizeof(int8_t)))));
188       const __m128i vi6x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i6 + 4)));
189       const __m128i vk6x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 100 * sizeof(int8_t)))));
190       const __m128i vi6x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i6 + 8)));
191       const __m128i vk6x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 104 * sizeof(int8_t)))));
192       const __m128i vi6xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i6 + 12)));
193       const __m128i vk6xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 108 * sizeof(int8_t)))));
194       i6 += 16;
195 
196       vacc0123 = _mm_macc_epi32(vi6x0123, vk6x0123, vacc0123);
197       vacc4567 = _mm_macc_epi32(vi6x4567, vk6x4567, vacc4567);
198       vacc89AB = _mm_macc_epi32(vi6x89AB, vk6x89AB, vacc89AB);
199       vaccCDEF = _mm_macc_epi32(vi6xCDEF, vk6xCDEF, vaccCDEF);
200 
201       const __m128i vi7x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i7)));
202       const __m128i vk7x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 112 * sizeof(int8_t)))));
203       const __m128i vi7x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i7 + 4)));
204       const __m128i vk7x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 116 * sizeof(int8_t)))));
205       const __m128i vi7x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i7 + 8)));
206       const __m128i vk7x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 120 * sizeof(int8_t)))));
207       const __m128i vi7xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i7 + 12)));
208       const __m128i vk7xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 124 * sizeof(int8_t)))));
209       i7 += 16;
210 
211       vacc0123 = _mm_macc_epi32(vi7x0123, vk7x0123, vacc0123);
212       vacc4567 = _mm_macc_epi32(vi7x4567, vk7x4567, vacc4567);
213       vacc89AB = _mm_macc_epi32(vi7x89AB, vk7x89AB, vacc89AB);
214       vaccCDEF = _mm_macc_epi32(vi7xCDEF, vk7xCDEF, vaccCDEF);
215 
216       const __m128i vi8x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i8)));
217       const __m128i vk8x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 128 * sizeof(int8_t)))));
218       const __m128i vi8x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i8 + 4)));
219       const __m128i vk8x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 132 * sizeof(int8_t)))));
220       const __m128i vi8x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i8 + 8)));
221       const __m128i vk8x89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 136 * sizeof(int8_t)))));
222       const __m128i vi8xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i8 + 12)));
223       const __m128i vk8xCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 16 * sizeof(int32_t) + 140 * sizeof(int8_t)))));
224       i8 += 16;
225 
226       vacc0123 = _mm_macc_epi32(vi8x0123, vk8x0123, vacc0123);
227       vacc4567 = _mm_macc_epi32(vi8x4567, vk8x4567, vacc4567);
228       vacc89AB = _mm_macc_epi32(vi8x89AB, vk8x89AB, vacc89AB);
229       vaccCDEF = _mm_macc_epi32(vi8xCDEF, vk8xCDEF, vaccCDEF);
230 
231       w = (const void*) ((uintptr_t) w + 16 * sizeof(int32_t) + 144 * sizeof(int8_t));
232 
233       __m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
234       __m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
235       __m128 vscaled89AB = _mm_cvtepi32_ps(vacc89AB);
236       __m128 vscaledCDEF = _mm_cvtepi32_ps(vaccCDEF);
237 
238       const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
239       vscaled0123 = _mm_mul_ps(vscaled0123, vscale);
240       vscaled4567 = _mm_mul_ps(vscaled4567, vscale);
241       vscaled89AB = _mm_mul_ps(vscaled89AB, vscale);
242       vscaledCDEF = _mm_mul_ps(vscaledCDEF, vscale);
243 
244       const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
245       vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
246       vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
247       vscaled89AB = _mm_min_ps(vscaled89AB, voutput_max_less_zero_point);
248       vscaledCDEF = _mm_min_ps(vscaledCDEF, voutput_max_less_zero_point);
249 
250       vacc0123 = _mm_cvtps_epi32(vscaled0123);
251       vacc4567 = _mm_cvtps_epi32(vscaled4567);
252       vacc89AB = _mm_cvtps_epi32(vscaled89AB);
253       vaccCDEF = _mm_cvtps_epi32(vscaledCDEF);
254 
255       const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
256       __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
257       __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
258 
259       const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
260       __m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
261       vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
262 
263       _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
264       output += 16;
265     }
266     if XNN_UNLIKELY(c != 0) {
267       const int8_t* k = (const int8_t*) ((const int32_t*) w + 16);
268       do {
269         __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
270 
271         const __m128i vi0x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i0)));
272         const __m128i vk0x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) k)));
273         i0 += 4;
274 
275         vacc0123 = _mm_macc_epi32(vi0x0123, vk0x0123, vacc0123);
276         const __m128i vi1x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i1)));
277         const __m128i vk1x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 16))));
278         i1 += 4;
279 
280         vacc0123 = _mm_macc_epi32(vi1x0123, vk1x0123, vacc0123);
281         const __m128i vi2x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i2)));
282         const __m128i vk2x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 32))));
283         i2 += 4;
284 
285         vacc0123 = _mm_macc_epi32(vi2x0123, vk2x0123, vacc0123);
286         const __m128i vi3x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i3)));
287         const __m128i vk3x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 48))));
288         i3 += 4;
289 
290         vacc0123 = _mm_macc_epi32(vi3x0123, vk3x0123, vacc0123);
291         const __m128i vi4x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i4)));
292         const __m128i vk4x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 64))));
293         i4 += 4;
294 
295         vacc0123 = _mm_macc_epi32(vi4x0123, vk4x0123, vacc0123);
296         const __m128i vi5x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i5)));
297         const __m128i vk5x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 80))));
298         i5 += 4;
299 
300         vacc0123 = _mm_macc_epi32(vi5x0123, vk5x0123, vacc0123);
301         const __m128i vi6x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i6)));
302         const __m128i vk6x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 96))));
303         i6 += 4;
304 
305         vacc0123 = _mm_macc_epi32(vi6x0123, vk6x0123, vacc0123);
306         const __m128i vi7x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i7)));
307         const __m128i vk7x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 112))));
308         i7 += 4;
309 
310         vacc0123 = _mm_macc_epi32(vi7x0123, vk7x0123, vacc0123);
311         const __m128i vi8x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i8)));
312         const __m128i vk8x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 128))));
313         i8 += 4;
314 
315         vacc0123 = _mm_macc_epi32(vi8x0123, vk8x0123, vacc0123);
316 
317         k += 4;
318 
319         __m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
320         vscaled0123 = _mm_mul_ps(vscaled0123, _mm_load_ps(params->fp32_sse4.scale));
321         vscaled0123 = _mm_min_ps(vscaled0123, _mm_load_ps(params->fp32_sse4.output_max_less_zero_point));
322         vacc0123 = _mm_cvtps_epi32(vscaled0123);
323 
324         w = (const void*) ((const int32_t*) w + 4);
325 
326         const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
327         __m128i vout0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc0123), voutput_zero_point);
328 
329         vout0123 = _mm_packs_epi16(vout0123, vout0123);
330         vout0123 = _mm_max_epi8(vout0123, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
331 
332         if XNN_LIKELY(c >= 4) {
333           _mm_storeu_si32(output, vout0123);
334           output += 4;
335           c -= 4;
336         } else {
337           if (c & 2) {
338             unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123, 0));
339             vout0123 = _mm_srli_epi32(vout0123, 16);
340             output += 2;
341           }
342           if (c & 1) {
343             *output = (int8_t) _mm_extract_epi8(vout0123, 0);
344             output += 1;
345           }
346           c = 0;
347         }
348       } while (c != 0);
349     }
350 
351     output = (int8_t*) ((uintptr_t) output + output_increment);
352   } while (--output_width != 0);
353 }
354