1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-dwconv/unipass-neon-mul16.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16
17
xnn_qs8_dwconv_minmax_fp32_ukernel_up8x25__neonv8_mul16(size_t channels,size_t output_width,const int8_t ** input,const void * weights,int8_t * output,size_t input_stride,size_t output_increment,size_t input_offset,const int8_t * zero,const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qs8_dwconv_minmax_fp32_ukernel_up8x25__neonv8_mul16(
19 size_t channels,
20 size_t output_width,
21 const int8_t** input,
22 const void* weights,
23 int8_t* output,
24 size_t input_stride,
25 size_t output_increment,
26 size_t input_offset,
27 const int8_t* zero,
28 const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
29 {
30 assert(channels != 0);
31 assert(output_width != 0);
32
33 const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
34 const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
35 const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
36 const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
37 do {
38 const int8_t* i0 = input[0];
39 assert(i0 != NULL);
40 if XNN_UNPREDICTABLE(i0 != zero) {
41 i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
42 }
43 const int8_t* i1 = input[1];
44 assert(i1 != NULL);
45 if XNN_UNPREDICTABLE(i1 != zero) {
46 i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
47 }
48 const int8_t* i2 = input[2];
49 assert(i2 != NULL);
50 if XNN_UNPREDICTABLE(i2 != zero) {
51 i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
52 }
53 const int8_t* i3 = input[3];
54 assert(i3 != NULL);
55 if XNN_UNPREDICTABLE(i3 != zero) {
56 i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
57 }
58 const int8_t* i4 = input[4];
59 assert(i4 != NULL);
60 if XNN_UNPREDICTABLE(i4 != zero) {
61 i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
62 }
63 const int8_t* i5 = input[5];
64 assert(i5 != NULL);
65 if XNN_UNPREDICTABLE(i5 != zero) {
66 i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
67 }
68 const int8_t* i6 = input[6];
69 assert(i6 != NULL);
70 if XNN_UNPREDICTABLE(i6 != zero) {
71 i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
72 }
73 const int8_t* i7 = input[7];
74 assert(i7 != NULL);
75 if XNN_UNPREDICTABLE(i7 != zero) {
76 i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
77 }
78 const int8_t* i8 = input[8];
79 assert(i8 != NULL);
80 if XNN_UNPREDICTABLE(i8 != zero) {
81 i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
82 }
83 const int8_t* i9 = input[9];
84 assert(i9 != NULL);
85 if XNN_UNPREDICTABLE(i9 != zero) {
86 i9 = (const int8_t*) ((uintptr_t) i9 + input_offset);
87 }
88 const int8_t* i10 = input[10];
89 assert(i10 != NULL);
90 if XNN_UNPREDICTABLE(i10 != zero) {
91 i10 = (const int8_t*) ((uintptr_t) i10 + input_offset);
92 }
93 const int8_t* i11 = input[11];
94 assert(i11 != NULL);
95 if XNN_UNPREDICTABLE(i11 != zero) {
96 i11 = (const int8_t*) ((uintptr_t) i11 + input_offset);
97 }
98 const int8_t* i12 = input[12];
99 assert(i12 != NULL);
100 if XNN_UNPREDICTABLE(i12 != zero) {
101 i12 = (const int8_t*) ((uintptr_t) i12 + input_offset);
102 }
103 const int8_t* i13 = input[13];
104 assert(i13 != NULL);
105 if XNN_UNPREDICTABLE(i13 != zero) {
106 i13 = (const int8_t*) ((uintptr_t) i13 + input_offset);
107 }
108 const int8_t* i14 = input[14];
109 assert(i14 != NULL);
110 if XNN_UNPREDICTABLE(i14 != zero) {
111 i14 = (const int8_t*) ((uintptr_t) i14 + input_offset);
112 }
113 const int8_t* i15 = input[15];
114 assert(i15 != NULL);
115 if XNN_UNPREDICTABLE(i15 != zero) {
116 i15 = (const int8_t*) ((uintptr_t) i15 + input_offset);
117 }
118 const int8_t* i16 = input[16];
119 assert(i16 != NULL);
120 if XNN_UNPREDICTABLE(i16 != zero) {
121 i16 = (const int8_t*) ((uintptr_t) i16 + input_offset);
122 }
123 const int8_t* i17 = input[17];
124 assert(i17 != NULL);
125 if XNN_UNPREDICTABLE(i17 != zero) {
126 i17 = (const int8_t*) ((uintptr_t) i17 + input_offset);
127 }
128 const int8_t* i18 = input[18];
129 assert(i18 != NULL);
130 if XNN_UNPREDICTABLE(i18 != zero) {
131 i18 = (const int8_t*) ((uintptr_t) i18 + input_offset);
132 }
133 const int8_t* i19 = input[19];
134 assert(i19 != NULL);
135 if XNN_UNPREDICTABLE(i19 != zero) {
136 i19 = (const int8_t*) ((uintptr_t) i19 + input_offset);
137 }
138 const int8_t* i20 = input[20];
139 assert(i20 != NULL);
140 if XNN_UNPREDICTABLE(i20 != zero) {
141 i20 = (const int8_t*) ((uintptr_t) i20 + input_offset);
142 }
143 const int8_t* i21 = input[21];
144 assert(i21 != NULL);
145 if XNN_UNPREDICTABLE(i21 != zero) {
146 i21 = (const int8_t*) ((uintptr_t) i21 + input_offset);
147 }
148 const int8_t* i22 = input[22];
149 assert(i22 != NULL);
150 if XNN_UNPREDICTABLE(i22 != zero) {
151 i22 = (const int8_t*) ((uintptr_t) i22 + input_offset);
152 }
153 const int8_t* i23 = input[23];
154 assert(i23 != NULL);
155 if XNN_UNPREDICTABLE(i23 != zero) {
156 i23 = (const int8_t*) ((uintptr_t) i23 + input_offset);
157 }
158 const int8_t* i24 = input[24];
159 assert(i24 != NULL);
160 if XNN_UNPREDICTABLE(i24 != zero) {
161 i24 = (const int8_t*) ((uintptr_t) i24 + input_offset);
162 }
163 input = (const int8_t**) ((uintptr_t) input + input_stride);
164
165 size_t c = channels;
166 const void* w = weights;
167 for (; c >= 8; c -= 8) {
168 int32x4_t vacc0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
169 int32x4_t vacc4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
170
171
172 const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
173 const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
174
175 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
176 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
177
178 const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
179 const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
180
181 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
182 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
183
184 const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
185 const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
186
187 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
188 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
189
190 const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
191 const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
192
193 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
194 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
195
196 const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
197 const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
198
199 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
200 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
201
202 const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
203 const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
204
205 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
206 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
207
208 const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6)); i6 += 8;
209 const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
210
211 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
212 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
213
214 const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7)); i7 += 8;
215 const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
216
217 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
218 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
219
220 const int16x8_t vi8x01234567 = vmovl_s8(vld1_s8(i8)); i8 += 8;
221 const int16x8_t vk8x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
222
223 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
224 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
225
226 const int16x8_t vi9x01234567 = vmovl_s8(vld1_s8(i9)); i9 += 8;
227 const int16x8_t vk9x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
228
229 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi9x01234567), vget_low_s16(vk9x01234567));
230 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi9x01234567), vget_high_s16(vk9x01234567));
231
232 const int16x8_t vi10x01234567 = vmovl_s8(vld1_s8(i10)); i10 += 8;
233 const int16x8_t vk10x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
234
235 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi10x01234567), vget_low_s16(vk10x01234567));
236 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi10x01234567), vget_high_s16(vk10x01234567));
237
238 const int16x8_t vi11x01234567 = vmovl_s8(vld1_s8(i11)); i11 += 8;
239 const int16x8_t vk11x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
240
241 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi11x01234567), vget_low_s16(vk11x01234567));
242 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi11x01234567), vget_high_s16(vk11x01234567));
243
244 const int16x8_t vi12x01234567 = vmovl_s8(vld1_s8(i12)); i12 += 8;
245 const int16x8_t vk12x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
246
247 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi12x01234567), vget_low_s16(vk12x01234567));
248 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi12x01234567), vget_high_s16(vk12x01234567));
249
250 const int16x8_t vi13x01234567 = vmovl_s8(vld1_s8(i13)); i13 += 8;
251 const int16x8_t vk13x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
252
253 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi13x01234567), vget_low_s16(vk13x01234567));
254 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi13x01234567), vget_high_s16(vk13x01234567));
255
256 const int16x8_t vi14x01234567 = vmovl_s8(vld1_s8(i14)); i14 += 8;
257 const int16x8_t vk14x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
258
259 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi14x01234567), vget_low_s16(vk14x01234567));
260 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi14x01234567), vget_high_s16(vk14x01234567));
261
262 const int16x8_t vi15x01234567 = vmovl_s8(vld1_s8(i15)); i15 += 8;
263 const int16x8_t vk15x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
264
265 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi15x01234567), vget_low_s16(vk15x01234567));
266 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi15x01234567), vget_high_s16(vk15x01234567));
267
268 const int16x8_t vi16x01234567 = vmovl_s8(vld1_s8(i16)); i16 += 8;
269 const int16x8_t vk16x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
270
271 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi16x01234567), vget_low_s16(vk16x01234567));
272 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi16x01234567), vget_high_s16(vk16x01234567));
273
274 const int16x8_t vi17x01234567 = vmovl_s8(vld1_s8(i17)); i17 += 8;
275 const int16x8_t vk17x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
276
277 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi17x01234567), vget_low_s16(vk17x01234567));
278 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi17x01234567), vget_high_s16(vk17x01234567));
279
280 const int16x8_t vi18x01234567 = vmovl_s8(vld1_s8(i18)); i18 += 8;
281 const int16x8_t vk18x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
282
283 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi18x01234567), vget_low_s16(vk18x01234567));
284 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi18x01234567), vget_high_s16(vk18x01234567));
285
286 const int16x8_t vi19x01234567 = vmovl_s8(vld1_s8(i19)); i19 += 8;
287 const int16x8_t vk19x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
288
289 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi19x01234567), vget_low_s16(vk19x01234567));
290 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi19x01234567), vget_high_s16(vk19x01234567));
291
292 const int16x8_t vi20x01234567 = vmovl_s8(vld1_s8(i20)); i20 += 8;
293 const int16x8_t vk20x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
294
295 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi20x01234567), vget_low_s16(vk20x01234567));
296 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi20x01234567), vget_high_s16(vk20x01234567));
297
298 const int16x8_t vi21x01234567 = vmovl_s8(vld1_s8(i21)); i21 += 8;
299 const int16x8_t vk21x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
300
301 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi21x01234567), vget_low_s16(vk21x01234567));
302 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi21x01234567), vget_high_s16(vk21x01234567));
303
304 const int16x8_t vi22x01234567 = vmovl_s8(vld1_s8(i22)); i22 += 8;
305 const int16x8_t vk22x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
306
307 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi22x01234567), vget_low_s16(vk22x01234567));
308 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi22x01234567), vget_high_s16(vk22x01234567));
309
310 const int16x8_t vi23x01234567 = vmovl_s8(vld1_s8(i23)); i23 += 8;
311 const int16x8_t vk23x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
312
313 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi23x01234567), vget_low_s16(vk23x01234567));
314 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi23x01234567), vget_high_s16(vk23x01234567));
315
316 const int16x8_t vi24x01234567 = vmovl_s8(vld1_s8(i24)); i24 += 8;
317 const int16x8_t vk24x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
318
319 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi24x01234567), vget_low_s16(vk24x01234567));
320 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi24x01234567), vget_high_s16(vk24x01234567));
321
322 float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
323 float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
324
325 vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
326 vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
327
328 vacc0123 = vcvtnq_s32_f32(vfpacc0123);
329 vacc4567 = vcvtnq_s32_f32(vfpacc4567);
330
331 #if XNN_ARCH_ARM64
332 int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
333
334 vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
335
336 int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
337 #else // !XNN_ARCH_ARM64
338 int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
339
340 vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
341
342 int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
343 #endif // !XNN_ARCH_ARM64
344
345 vout01234567 = vmax_s8(vout01234567, voutput_min);
346
347 vout01234567 = vmin_s8(vout01234567, voutput_max);
348
349 vst1_s8(output, vout01234567); output += 8;
350 }
351 if XNN_UNLIKELY(c != 0) {
352 {
353 int32x4_t vacc0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
354 int32x4_t vacc4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
355
356 const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0));
357 const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w));
358
359 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
360 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
361 const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1));
362 const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 8)));
363
364 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
365 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
366 const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2));
367 const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 16)));
368
369 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
370 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
371 const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3));
372 const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 24)));
373
374 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
375 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
376 const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4));
377 const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 32)));
378
379 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
380 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
381 const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5));
382 const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 40)));
383
384 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
385 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
386 const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6));
387 const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 48)));
388
389 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
390 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
391 const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7));
392 const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 56)));
393
394 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
395 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
396 const int16x8_t vi8x01234567 = vmovl_s8(vld1_s8(i8));
397 const int16x8_t vk8x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 64)));
398
399 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
400 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
401 const int16x8_t vi9x01234567 = vmovl_s8(vld1_s8(i9));
402 const int16x8_t vk9x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 72)));
403
404 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi9x01234567), vget_low_s16(vk9x01234567));
405 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi9x01234567), vget_high_s16(vk9x01234567));
406 const int16x8_t vi10x01234567 = vmovl_s8(vld1_s8(i10));
407 const int16x8_t vk10x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 80)));
408
409 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi10x01234567), vget_low_s16(vk10x01234567));
410 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi10x01234567), vget_high_s16(vk10x01234567));
411 const int16x8_t vi11x01234567 = vmovl_s8(vld1_s8(i11));
412 const int16x8_t vk11x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 88)));
413
414 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi11x01234567), vget_low_s16(vk11x01234567));
415 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi11x01234567), vget_high_s16(vk11x01234567));
416 const int16x8_t vi12x01234567 = vmovl_s8(vld1_s8(i12));
417 const int16x8_t vk12x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 96)));
418
419 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi12x01234567), vget_low_s16(vk12x01234567));
420 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi12x01234567), vget_high_s16(vk12x01234567));
421 const int16x8_t vi13x01234567 = vmovl_s8(vld1_s8(i13));
422 const int16x8_t vk13x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 104)));
423
424 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi13x01234567), vget_low_s16(vk13x01234567));
425 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi13x01234567), vget_high_s16(vk13x01234567));
426 const int16x8_t vi14x01234567 = vmovl_s8(vld1_s8(i14));
427 const int16x8_t vk14x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 112)));
428
429 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi14x01234567), vget_low_s16(vk14x01234567));
430 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi14x01234567), vget_high_s16(vk14x01234567));
431 const int16x8_t vi15x01234567 = vmovl_s8(vld1_s8(i15));
432 const int16x8_t vk15x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 120)));
433
434 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi15x01234567), vget_low_s16(vk15x01234567));
435 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi15x01234567), vget_high_s16(vk15x01234567));
436 const int16x8_t vi16x01234567 = vmovl_s8(vld1_s8(i16));
437 const int16x8_t vk16x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 128)));
438
439 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi16x01234567), vget_low_s16(vk16x01234567));
440 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi16x01234567), vget_high_s16(vk16x01234567));
441 const int16x8_t vi17x01234567 = vmovl_s8(vld1_s8(i17));
442 const int16x8_t vk17x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 136)));
443
444 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi17x01234567), vget_low_s16(vk17x01234567));
445 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi17x01234567), vget_high_s16(vk17x01234567));
446 const int16x8_t vi18x01234567 = vmovl_s8(vld1_s8(i18));
447 const int16x8_t vk18x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 144)));
448
449 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi18x01234567), vget_low_s16(vk18x01234567));
450 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi18x01234567), vget_high_s16(vk18x01234567));
451 const int16x8_t vi19x01234567 = vmovl_s8(vld1_s8(i19));
452 const int16x8_t vk19x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 152)));
453
454 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi19x01234567), vget_low_s16(vk19x01234567));
455 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi19x01234567), vget_high_s16(vk19x01234567));
456 const int16x8_t vi20x01234567 = vmovl_s8(vld1_s8(i20));
457 const int16x8_t vk20x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 160)));
458
459 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi20x01234567), vget_low_s16(vk20x01234567));
460 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi20x01234567), vget_high_s16(vk20x01234567));
461 const int16x8_t vi21x01234567 = vmovl_s8(vld1_s8(i21));
462 const int16x8_t vk21x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 168)));
463
464 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi21x01234567), vget_low_s16(vk21x01234567));
465 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi21x01234567), vget_high_s16(vk21x01234567));
466 const int16x8_t vi22x01234567 = vmovl_s8(vld1_s8(i22));
467 const int16x8_t vk22x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 176)));
468
469 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi22x01234567), vget_low_s16(vk22x01234567));
470 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi22x01234567), vget_high_s16(vk22x01234567));
471 const int16x8_t vi23x01234567 = vmovl_s8(vld1_s8(i23));
472 const int16x8_t vk23x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 184)));
473
474 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi23x01234567), vget_low_s16(vk23x01234567));
475 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi23x01234567), vget_high_s16(vk23x01234567));
476 const int16x8_t vi24x01234567 = vmovl_s8(vld1_s8(i24));
477 const int16x8_t vk24x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 192)));
478
479 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi24x01234567), vget_low_s16(vk24x01234567));
480 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi24x01234567), vget_high_s16(vk24x01234567));
481
482 float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
483 float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
484
485 vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
486 vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
487
488 vacc0123 = vcvtnq_s32_f32(vfpacc0123);
489 vacc4567 = vcvtnq_s32_f32(vfpacc4567);
490
491 #if XNN_ARCH_ARM64
492 int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
493 #else
494 int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
495 #endif
496 vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
497
498 int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
499 vout01234567 = vmax_s8(vout01234567, voutput_min);
500 vout01234567 = vmin_s8(vout01234567, voutput_max);
501
502 if (c & 4) {
503 vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
504 vout01234567 = vext_s8(vout01234567, vout01234567, 4);
505 }
506 if (c & 2) {
507 vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
508 vout01234567 = vext_s8(vout01234567, vout01234567, 2);
509 }
510 if (c & 1) {
511 vst1_lane_s8(output, vout01234567, 0); output += 1;
512 }
513 }
514 }
515
516 output = (int8_t*) ((uintptr_t) output + output_increment);
517 } while (--output_width != 0);
518 }
519