1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-dwconv/unipass-neon-mul16.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/dwconv.h>
15
16
xnn_qs8_dwconv_minmax_fp32_ukernel_up8x25__neon_mul16(size_t channels,size_t output_width,const int8_t ** input,const void * weights,int8_t * output,size_t input_stride,size_t output_increment,size_t input_offset,const int8_t * zero,const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_qs8_dwconv_minmax_fp32_ukernel_up8x25__neon_mul16(
18 size_t channels,
19 size_t output_width,
20 const int8_t** input,
21 const void* weights,
22 int8_t* output,
23 size_t input_stride,
24 size_t output_increment,
25 size_t input_offset,
26 const int8_t* zero,
27 const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28 {
29 assert(channels != 0);
30 assert(output_width != 0);
31
32 const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
33 const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
34 const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
35 const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neon.output_min);
36 const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neon.output_max);
37 do {
38 const int8_t* i0 = input[0];
39 assert(i0 != NULL);
40 if XNN_UNPREDICTABLE(i0 != zero) {
41 i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
42 }
43 const int8_t* i1 = input[1];
44 assert(i1 != NULL);
45 if XNN_UNPREDICTABLE(i1 != zero) {
46 i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
47 }
48 const int8_t* i2 = input[2];
49 assert(i2 != NULL);
50 if XNN_UNPREDICTABLE(i2 != zero) {
51 i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
52 }
53 const int8_t* i3 = input[3];
54 assert(i3 != NULL);
55 if XNN_UNPREDICTABLE(i3 != zero) {
56 i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
57 }
58 const int8_t* i4 = input[4];
59 assert(i4 != NULL);
60 if XNN_UNPREDICTABLE(i4 != zero) {
61 i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
62 }
63 const int8_t* i5 = input[5];
64 assert(i5 != NULL);
65 if XNN_UNPREDICTABLE(i5 != zero) {
66 i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
67 }
68 const int8_t* i6 = input[6];
69 assert(i6 != NULL);
70 if XNN_UNPREDICTABLE(i6 != zero) {
71 i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
72 }
73 const int8_t* i7 = input[7];
74 assert(i7 != NULL);
75 if XNN_UNPREDICTABLE(i7 != zero) {
76 i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
77 }
78 const int8_t* i8 = input[8];
79 assert(i8 != NULL);
80 if XNN_UNPREDICTABLE(i8 != zero) {
81 i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
82 }
83 const int8_t* i9 = input[9];
84 assert(i9 != NULL);
85 if XNN_UNPREDICTABLE(i9 != zero) {
86 i9 = (const int8_t*) ((uintptr_t) i9 + input_offset);
87 }
88 const int8_t* i10 = input[10];
89 assert(i10 != NULL);
90 if XNN_UNPREDICTABLE(i10 != zero) {
91 i10 = (const int8_t*) ((uintptr_t) i10 + input_offset);
92 }
93 const int8_t* i11 = input[11];
94 assert(i11 != NULL);
95 if XNN_UNPREDICTABLE(i11 != zero) {
96 i11 = (const int8_t*) ((uintptr_t) i11 + input_offset);
97 }
98 const int8_t* i12 = input[12];
99 assert(i12 != NULL);
100 if XNN_UNPREDICTABLE(i12 != zero) {
101 i12 = (const int8_t*) ((uintptr_t) i12 + input_offset);
102 }
103 const int8_t* i13 = input[13];
104 assert(i13 != NULL);
105 if XNN_UNPREDICTABLE(i13 != zero) {
106 i13 = (const int8_t*) ((uintptr_t) i13 + input_offset);
107 }
108 const int8_t* i14 = input[14];
109 assert(i14 != NULL);
110 if XNN_UNPREDICTABLE(i14 != zero) {
111 i14 = (const int8_t*) ((uintptr_t) i14 + input_offset);
112 }
113 const int8_t* i15 = input[15];
114 assert(i15 != NULL);
115 if XNN_UNPREDICTABLE(i15 != zero) {
116 i15 = (const int8_t*) ((uintptr_t) i15 + input_offset);
117 }
118 const int8_t* i16 = input[16];
119 assert(i16 != NULL);
120 if XNN_UNPREDICTABLE(i16 != zero) {
121 i16 = (const int8_t*) ((uintptr_t) i16 + input_offset);
122 }
123 const int8_t* i17 = input[17];
124 assert(i17 != NULL);
125 if XNN_UNPREDICTABLE(i17 != zero) {
126 i17 = (const int8_t*) ((uintptr_t) i17 + input_offset);
127 }
128 const int8_t* i18 = input[18];
129 assert(i18 != NULL);
130 if XNN_UNPREDICTABLE(i18 != zero) {
131 i18 = (const int8_t*) ((uintptr_t) i18 + input_offset);
132 }
133 const int8_t* i19 = input[19];
134 assert(i19 != NULL);
135 if XNN_UNPREDICTABLE(i19 != zero) {
136 i19 = (const int8_t*) ((uintptr_t) i19 + input_offset);
137 }
138 const int8_t* i20 = input[20];
139 assert(i20 != NULL);
140 if XNN_UNPREDICTABLE(i20 != zero) {
141 i20 = (const int8_t*) ((uintptr_t) i20 + input_offset);
142 }
143 const int8_t* i21 = input[21];
144 assert(i21 != NULL);
145 if XNN_UNPREDICTABLE(i21 != zero) {
146 i21 = (const int8_t*) ((uintptr_t) i21 + input_offset);
147 }
148 const int8_t* i22 = input[22];
149 assert(i22 != NULL);
150 if XNN_UNPREDICTABLE(i22 != zero) {
151 i22 = (const int8_t*) ((uintptr_t) i22 + input_offset);
152 }
153 const int8_t* i23 = input[23];
154 assert(i23 != NULL);
155 if XNN_UNPREDICTABLE(i23 != zero) {
156 i23 = (const int8_t*) ((uintptr_t) i23 + input_offset);
157 }
158 const int8_t* i24 = input[24];
159 assert(i24 != NULL);
160 if XNN_UNPREDICTABLE(i24 != zero) {
161 i24 = (const int8_t*) ((uintptr_t) i24 + input_offset);
162 }
163 input = (const int8_t**) ((uintptr_t) input + input_stride);
164
165 size_t c = channels;
166 const void* w = weights;
167 for (; c >= 8; c -= 8) {
168 int32x4_t vacc0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
169 int32x4_t vacc4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
170
171
172 const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0)); i0 += 8;
173 const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
174
175 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
176 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
177
178 const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1)); i1 += 8;
179 const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
180
181 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
182 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
183
184 const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2)); i2 += 8;
185 const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
186
187 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
188 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
189
190 const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3)); i3 += 8;
191 const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
192
193 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
194 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
195
196 const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4)); i4 += 8;
197 const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
198
199 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
200 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
201
202 const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5)); i5 += 8;
203 const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
204
205 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
206 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
207
208 const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6)); i6 += 8;
209 const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
210
211 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
212 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
213
214 const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7)); i7 += 8;
215 const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
216
217 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
218 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
219
220 const int16x8_t vi8x01234567 = vmovl_s8(vld1_s8(i8)); i8 += 8;
221 const int16x8_t vk8x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
222
223 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
224 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
225
226 const int16x8_t vi9x01234567 = vmovl_s8(vld1_s8(i9)); i9 += 8;
227 const int16x8_t vk9x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
228
229 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi9x01234567), vget_low_s16(vk9x01234567));
230 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi9x01234567), vget_high_s16(vk9x01234567));
231
232 const int16x8_t vi10x01234567 = vmovl_s8(vld1_s8(i10)); i10 += 8;
233 const int16x8_t vk10x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
234
235 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi10x01234567), vget_low_s16(vk10x01234567));
236 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi10x01234567), vget_high_s16(vk10x01234567));
237
238 const int16x8_t vi11x01234567 = vmovl_s8(vld1_s8(i11)); i11 += 8;
239 const int16x8_t vk11x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
240
241 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi11x01234567), vget_low_s16(vk11x01234567));
242 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi11x01234567), vget_high_s16(vk11x01234567));
243
244 const int16x8_t vi12x01234567 = vmovl_s8(vld1_s8(i12)); i12 += 8;
245 const int16x8_t vk12x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
246
247 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi12x01234567), vget_low_s16(vk12x01234567));
248 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi12x01234567), vget_high_s16(vk12x01234567));
249
250 const int16x8_t vi13x01234567 = vmovl_s8(vld1_s8(i13)); i13 += 8;
251 const int16x8_t vk13x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
252
253 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi13x01234567), vget_low_s16(vk13x01234567));
254 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi13x01234567), vget_high_s16(vk13x01234567));
255
256 const int16x8_t vi14x01234567 = vmovl_s8(vld1_s8(i14)); i14 += 8;
257 const int16x8_t vk14x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
258
259 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi14x01234567), vget_low_s16(vk14x01234567));
260 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi14x01234567), vget_high_s16(vk14x01234567));
261
262 const int16x8_t vi15x01234567 = vmovl_s8(vld1_s8(i15)); i15 += 8;
263 const int16x8_t vk15x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
264
265 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi15x01234567), vget_low_s16(vk15x01234567));
266 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi15x01234567), vget_high_s16(vk15x01234567));
267
268 const int16x8_t vi16x01234567 = vmovl_s8(vld1_s8(i16)); i16 += 8;
269 const int16x8_t vk16x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
270
271 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi16x01234567), vget_low_s16(vk16x01234567));
272 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi16x01234567), vget_high_s16(vk16x01234567));
273
274 const int16x8_t vi17x01234567 = vmovl_s8(vld1_s8(i17)); i17 += 8;
275 const int16x8_t vk17x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
276
277 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi17x01234567), vget_low_s16(vk17x01234567));
278 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi17x01234567), vget_high_s16(vk17x01234567));
279
280 const int16x8_t vi18x01234567 = vmovl_s8(vld1_s8(i18)); i18 += 8;
281 const int16x8_t vk18x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
282
283 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi18x01234567), vget_low_s16(vk18x01234567));
284 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi18x01234567), vget_high_s16(vk18x01234567));
285
286 const int16x8_t vi19x01234567 = vmovl_s8(vld1_s8(i19)); i19 += 8;
287 const int16x8_t vk19x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
288
289 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi19x01234567), vget_low_s16(vk19x01234567));
290 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi19x01234567), vget_high_s16(vk19x01234567));
291
292 const int16x8_t vi20x01234567 = vmovl_s8(vld1_s8(i20)); i20 += 8;
293 const int16x8_t vk20x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
294
295 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi20x01234567), vget_low_s16(vk20x01234567));
296 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi20x01234567), vget_high_s16(vk20x01234567));
297
298 const int16x8_t vi21x01234567 = vmovl_s8(vld1_s8(i21)); i21 += 8;
299 const int16x8_t vk21x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
300
301 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi21x01234567), vget_low_s16(vk21x01234567));
302 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi21x01234567), vget_high_s16(vk21x01234567));
303
304 const int16x8_t vi22x01234567 = vmovl_s8(vld1_s8(i22)); i22 += 8;
305 const int16x8_t vk22x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
306
307 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi22x01234567), vget_low_s16(vk22x01234567));
308 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi22x01234567), vget_high_s16(vk22x01234567));
309
310 const int16x8_t vi23x01234567 = vmovl_s8(vld1_s8(i23)); i23 += 8;
311 const int16x8_t vk23x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
312
313 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi23x01234567), vget_low_s16(vk23x01234567));
314 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi23x01234567), vget_high_s16(vk23x01234567));
315
316 const int16x8_t vi24x01234567 = vmovl_s8(vld1_s8(i24)); i24 += 8;
317 const int16x8_t vk24x01234567 = vmovl_s8(vld1_s8(w)); w = (const void*) ((const int8_t*) w + 8);
318
319 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi24x01234567), vget_low_s16(vk24x01234567));
320 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi24x01234567), vget_high_s16(vk24x01234567));
321
322 float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
323 float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
324
325 vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
326 vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
327
328 vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
329 vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
330
331 vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
332 vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
333
334 #if XNN_ARCH_ARM64
335 int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
336
337
338 int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
339 #else // !XNN_ARCH_ARM64
340 int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
341
342
343 int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
344 #endif // !XNN_ARCH_ARM64
345
346 vout01234567 = vmax_s8(vout01234567, voutput_min);
347
348 vout01234567 = vmin_s8(vout01234567, voutput_max);
349
350 vst1_s8(output, vout01234567); output += 8;
351 }
352 if XNN_UNLIKELY(c != 0) {
353 {
354 int32x4_t vacc0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
355 int32x4_t vacc4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
356
357 const int16x8_t vi0x01234567 = vmovl_s8(vld1_s8(i0));
358 const int16x8_t vk0x01234567 = vmovl_s8(vld1_s8(w));
359
360 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
361 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
362 const int16x8_t vi1x01234567 = vmovl_s8(vld1_s8(i1));
363 const int16x8_t vk1x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 8)));
364
365 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
366 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
367 const int16x8_t vi2x01234567 = vmovl_s8(vld1_s8(i2));
368 const int16x8_t vk2x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 16)));
369
370 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
371 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
372 const int16x8_t vi3x01234567 = vmovl_s8(vld1_s8(i3));
373 const int16x8_t vk3x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 24)));
374
375 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
376 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
377 const int16x8_t vi4x01234567 = vmovl_s8(vld1_s8(i4));
378 const int16x8_t vk4x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 32)));
379
380 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
381 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
382 const int16x8_t vi5x01234567 = vmovl_s8(vld1_s8(i5));
383 const int16x8_t vk5x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 40)));
384
385 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
386 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
387 const int16x8_t vi6x01234567 = vmovl_s8(vld1_s8(i6));
388 const int16x8_t vk6x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 48)));
389
390 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
391 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
392 const int16x8_t vi7x01234567 = vmovl_s8(vld1_s8(i7));
393 const int16x8_t vk7x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 56)));
394
395 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
396 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
397 const int16x8_t vi8x01234567 = vmovl_s8(vld1_s8(i8));
398 const int16x8_t vk8x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 64)));
399
400 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
401 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
402 const int16x8_t vi9x01234567 = vmovl_s8(vld1_s8(i9));
403 const int16x8_t vk9x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 72)));
404
405 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi9x01234567), vget_low_s16(vk9x01234567));
406 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi9x01234567), vget_high_s16(vk9x01234567));
407 const int16x8_t vi10x01234567 = vmovl_s8(vld1_s8(i10));
408 const int16x8_t vk10x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 80)));
409
410 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi10x01234567), vget_low_s16(vk10x01234567));
411 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi10x01234567), vget_high_s16(vk10x01234567));
412 const int16x8_t vi11x01234567 = vmovl_s8(vld1_s8(i11));
413 const int16x8_t vk11x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 88)));
414
415 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi11x01234567), vget_low_s16(vk11x01234567));
416 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi11x01234567), vget_high_s16(vk11x01234567));
417 const int16x8_t vi12x01234567 = vmovl_s8(vld1_s8(i12));
418 const int16x8_t vk12x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 96)));
419
420 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi12x01234567), vget_low_s16(vk12x01234567));
421 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi12x01234567), vget_high_s16(vk12x01234567));
422 const int16x8_t vi13x01234567 = vmovl_s8(vld1_s8(i13));
423 const int16x8_t vk13x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 104)));
424
425 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi13x01234567), vget_low_s16(vk13x01234567));
426 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi13x01234567), vget_high_s16(vk13x01234567));
427 const int16x8_t vi14x01234567 = vmovl_s8(vld1_s8(i14));
428 const int16x8_t vk14x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 112)));
429
430 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi14x01234567), vget_low_s16(vk14x01234567));
431 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi14x01234567), vget_high_s16(vk14x01234567));
432 const int16x8_t vi15x01234567 = vmovl_s8(vld1_s8(i15));
433 const int16x8_t vk15x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 120)));
434
435 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi15x01234567), vget_low_s16(vk15x01234567));
436 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi15x01234567), vget_high_s16(vk15x01234567));
437 const int16x8_t vi16x01234567 = vmovl_s8(vld1_s8(i16));
438 const int16x8_t vk16x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 128)));
439
440 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi16x01234567), vget_low_s16(vk16x01234567));
441 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi16x01234567), vget_high_s16(vk16x01234567));
442 const int16x8_t vi17x01234567 = vmovl_s8(vld1_s8(i17));
443 const int16x8_t vk17x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 136)));
444
445 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi17x01234567), vget_low_s16(vk17x01234567));
446 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi17x01234567), vget_high_s16(vk17x01234567));
447 const int16x8_t vi18x01234567 = vmovl_s8(vld1_s8(i18));
448 const int16x8_t vk18x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 144)));
449
450 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi18x01234567), vget_low_s16(vk18x01234567));
451 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi18x01234567), vget_high_s16(vk18x01234567));
452 const int16x8_t vi19x01234567 = vmovl_s8(vld1_s8(i19));
453 const int16x8_t vk19x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 152)));
454
455 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi19x01234567), vget_low_s16(vk19x01234567));
456 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi19x01234567), vget_high_s16(vk19x01234567));
457 const int16x8_t vi20x01234567 = vmovl_s8(vld1_s8(i20));
458 const int16x8_t vk20x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 160)));
459
460 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi20x01234567), vget_low_s16(vk20x01234567));
461 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi20x01234567), vget_high_s16(vk20x01234567));
462 const int16x8_t vi21x01234567 = vmovl_s8(vld1_s8(i21));
463 const int16x8_t vk21x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 168)));
464
465 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi21x01234567), vget_low_s16(vk21x01234567));
466 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi21x01234567), vget_high_s16(vk21x01234567));
467 const int16x8_t vi22x01234567 = vmovl_s8(vld1_s8(i22));
468 const int16x8_t vk22x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 176)));
469
470 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi22x01234567), vget_low_s16(vk22x01234567));
471 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi22x01234567), vget_high_s16(vk22x01234567));
472 const int16x8_t vi23x01234567 = vmovl_s8(vld1_s8(i23));
473 const int16x8_t vk23x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 184)));
474
475 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi23x01234567), vget_low_s16(vk23x01234567));
476 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi23x01234567), vget_high_s16(vk23x01234567));
477 const int16x8_t vi24x01234567 = vmovl_s8(vld1_s8(i24));
478 const int16x8_t vk24x01234567 = vmovl_s8(vld1_s8((const void*) ((const int8_t*) w + 192)));
479
480 vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi24x01234567), vget_low_s16(vk24x01234567));
481 vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi24x01234567), vget_high_s16(vk24x01234567));
482
483 float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
484 float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
485
486 vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
487 vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
488
489 vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
490 vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
491
492 vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
493 vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
494
495 #if XNN_ARCH_ARM64
496 int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
497 #else
498 int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
499 #endif
500
501 int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
502 vout01234567 = vmax_s8(vout01234567, voutput_min);
503 vout01234567 = vmin_s8(vout01234567, voutput_max);
504
505 if (c & 4) {
506 vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
507 vout01234567 = vext_s8(vout01234567, vout01234567, 4);
508 }
509 if (c & 2) {
510 vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
511 vout01234567 = vext_s8(vout01234567, vout01234567, 2);
512 }
513 if (c & 1) {
514 vst1_lane_s8(output, vout01234567, 0); output += 1;
515 }
516 }
517 }
518
519 output = (int8_t*) ((uintptr_t) output + output_increment);
520 } while (--output_width != 0);
521 }
522