xref: /aosp_15_r20/external/XNNPACK/src/qu8-dwconv/gen/up32x9-minmax-rndnu-neon-mul8.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qu8-dwconv/unipass-neon-mul8.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/dwconv.h>
15 
16 
xnn_qu8_dwconv_minmax_rndnu_ukernel_up32x9__neon_mul8(size_t channels,size_t output_width,const uint8_t ** input,const void * weights,uint8_t * output,size_t input_stride,size_t output_increment,size_t input_offset,const uint8_t * zero,const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_qu8_dwconv_minmax_rndnu_ukernel_up32x9__neon_mul8(
18     size_t channels,
19     size_t output_width,
20     const uint8_t** input,
21     const void* weights,
22     uint8_t* output,
23     size_t input_stride,
24     size_t output_increment,
25     size_t input_offset,
26     const uint8_t* zero,
27     const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28 {
29   assert(channels != 0);
30   assert(output_width != 0);
31 
32   const uint8x8_t vkernel_zero_point = vld1_dup_u8(params->rndnu_neon.kernel_zero_point);
33   const uint16x8_t vkernel_zero_point16 = vmovl_u8(vkernel_zero_point);
34   const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
35   const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
36   const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
37   const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
38   const uint8x16_t voutput_min = vld1q_dup_u8(&params->rndnu_neon.output_min);
39   const uint8x16_t voutput_max = vld1q_dup_u8(&params->rndnu_neon.output_max);
40   do {
41     const uint8_t* i0 = input[0];
42     assert(i0 != NULL);
43     if XNN_UNPREDICTABLE(i0 != zero) {
44       i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
45     }
46     const uint8_t* i1 = input[1];
47     assert(i1 != NULL);
48     if XNN_UNPREDICTABLE(i1 != zero) {
49       i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
50     }
51     const uint8_t* i2 = input[2];
52     assert(i2 != NULL);
53     if XNN_UNPREDICTABLE(i2 != zero) {
54       i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
55     }
56     const uint8_t* i3 = input[3];
57     assert(i3 != NULL);
58     if XNN_UNPREDICTABLE(i3 != zero) {
59       i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
60     }
61     const uint8_t* i4 = input[4];
62     assert(i4 != NULL);
63     if XNN_UNPREDICTABLE(i4 != zero) {
64       i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
65     }
66     const uint8_t* i5 = input[5];
67     assert(i5 != NULL);
68     if XNN_UNPREDICTABLE(i5 != zero) {
69       i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
70     }
71     const uint8_t* i6 = input[6];
72     assert(i6 != NULL);
73     if XNN_UNPREDICTABLE(i6 != zero) {
74       i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
75     }
76     const uint8_t* i7 = input[7];
77     assert(i7 != NULL);
78     if XNN_UNPREDICTABLE(i7 != zero) {
79       i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
80     }
81     const uint8_t* i8 = input[8];
82     assert(i8 != NULL);
83     if XNN_UNPREDICTABLE(i8 != zero) {
84       i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
85     }
86     input = (const uint8_t**) ((uintptr_t) input + input_stride);
87 
88 
89     size_t c = channels;
90     const void* w = weights;
91     for (; c >= 32; c -= 32) {
92       int32x4_t vacc0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
93       int32x4_t vacc4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
94       int32x4_t vacc89AB = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
95       int32x4_t vaccCDEF = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
96       int32x4_t vaccGHIJ = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
97       int32x4_t vaccKLMN = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
98       int32x4_t vaccOPQR = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
99       int32x4_t vaccSTUV = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
100 
101 
102       const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
103       const uint8x8_t vk0x01234567 = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
104       const uint8x8_t vi0x89ABCDEF = vld1_u8(i0); i0 += 8;
105       const uint8x8_t vk0x89ABCDEF = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
106       const uint8x8_t vi0xGHIJKLMN = vld1_u8(i0); i0 += 8;
107       const uint8x8_t vk0xGHIJKLMN = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
108       const uint8x8_t vi0xOPQRSTUV = vld1_u8(i0); i0 += 8;
109       const uint8x8_t vk0xOPQRSTUV = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
110 
111       uint16x8_t vprod01234567 = vmull_u8(vi0x01234567, vk0x01234567);
112       uint16x8_t vprod89ABCDEF = vmull_u8(vi0x89ABCDEF, vk0x89ABCDEF);
113       uint16x8_t vprodGHIJKLMN = vmull_u8(vi0xGHIJKLMN, vk0xGHIJKLMN);
114       uint16x8_t vprodOPQRSTUV = vmull_u8(vi0xOPQRSTUV, vk0xOPQRSTUV);
115 
116       vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vprod01234567)));
117       vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vprod01234567)));
118       vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc89AB), vget_low_u16(vprod89ABCDEF)));
119       vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccCDEF), vget_high_u16(vprod89ABCDEF)));
120       vaccGHIJ = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccGHIJ), vget_low_u16(vprodGHIJKLMN)));
121       vaccKLMN = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccKLMN), vget_high_u16(vprodGHIJKLMN)));
122       vaccOPQR = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccOPQR), vget_low_u16(vprodOPQRSTUV)));
123       vaccSTUV = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccSTUV), vget_high_u16(vprodOPQRSTUV)));
124       const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
125       const uint8x8_t vk1x01234567 = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
126       const uint8x8_t vi1x89ABCDEF = vld1_u8(i1); i1 += 8;
127       const uint8x8_t vk1x89ABCDEF = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
128       const uint8x8_t vi1xGHIJKLMN = vld1_u8(i1); i1 += 8;
129       const uint8x8_t vk1xGHIJKLMN = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
130       const uint8x8_t vi1xOPQRSTUV = vld1_u8(i1); i1 += 8;
131       const uint8x8_t vk1xOPQRSTUV = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
132 
133       vprod01234567 = vmull_u8(vi1x01234567, vk1x01234567);
134       uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
135       vprod89ABCDEF = vmull_u8(vi1x89ABCDEF, vk1x89ABCDEF);
136       uint16x8_t vsum89ABCDEF = vaddl_u8(vi0x89ABCDEF, vi1x89ABCDEF);
137       vprodGHIJKLMN = vmull_u8(vi1xGHIJKLMN, vk1xGHIJKLMN);
138       uint16x8_t vsumGHIJKLMN = vaddl_u8(vi0xGHIJKLMN, vi1xGHIJKLMN);
139       vprodOPQRSTUV = vmull_u8(vi1xOPQRSTUV, vk1xOPQRSTUV);
140       uint16x8_t vsumOPQRSTUV = vaddl_u8(vi0xOPQRSTUV, vi1xOPQRSTUV);
141 
142       vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vprod01234567)));
143       vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vprod01234567)));
144       vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc89AB), vget_low_u16(vprod89ABCDEF)));
145       vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccCDEF), vget_high_u16(vprod89ABCDEF)));
146       vaccGHIJ = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccGHIJ), vget_low_u16(vprodGHIJKLMN)));
147       vaccKLMN = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccKLMN), vget_high_u16(vprodGHIJKLMN)));
148       vaccOPQR = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccOPQR), vget_low_u16(vprodOPQRSTUV)));
149       vaccSTUV = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccSTUV), vget_high_u16(vprodOPQRSTUV)));
150       const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
151       const uint8x8_t vk2x01234567 = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
152       const uint8x8_t vi2x89ABCDEF = vld1_u8(i2); i2 += 8;
153       const uint8x8_t vk2x89ABCDEF = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
154       const uint8x8_t vi2xGHIJKLMN = vld1_u8(i2); i2 += 8;
155       const uint8x8_t vk2xGHIJKLMN = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
156       const uint8x8_t vi2xOPQRSTUV = vld1_u8(i2); i2 += 8;
157       const uint8x8_t vk2xOPQRSTUV = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
158 
159       vprod01234567 = vmull_u8(vi2x01234567, vk2x01234567);
160       vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
161       vprod89ABCDEF = vmull_u8(vi2x89ABCDEF, vk2x89ABCDEF);
162       vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi2x89ABCDEF);
163       vprodGHIJKLMN = vmull_u8(vi2xGHIJKLMN, vk2xGHIJKLMN);
164       vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi2xGHIJKLMN);
165       vprodOPQRSTUV = vmull_u8(vi2xOPQRSTUV, vk2xOPQRSTUV);
166       vsumOPQRSTUV = vaddw_u8(vsumOPQRSTUV, vi2xOPQRSTUV);
167 
168       vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vprod01234567)));
169       vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vprod01234567)));
170       vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc89AB), vget_low_u16(vprod89ABCDEF)));
171       vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccCDEF), vget_high_u16(vprod89ABCDEF)));
172       vaccGHIJ = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccGHIJ), vget_low_u16(vprodGHIJKLMN)));
173       vaccKLMN = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccKLMN), vget_high_u16(vprodGHIJKLMN)));
174       vaccOPQR = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccOPQR), vget_low_u16(vprodOPQRSTUV)));
175       vaccSTUV = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccSTUV), vget_high_u16(vprodOPQRSTUV)));
176       const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
177       const uint8x8_t vk3x01234567 = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
178       const uint8x8_t vi3x89ABCDEF = vld1_u8(i3); i3 += 8;
179       const uint8x8_t vk3x89ABCDEF = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
180       const uint8x8_t vi3xGHIJKLMN = vld1_u8(i3); i3 += 8;
181       const uint8x8_t vk3xGHIJKLMN = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
182       const uint8x8_t vi3xOPQRSTUV = vld1_u8(i3); i3 += 8;
183       const uint8x8_t vk3xOPQRSTUV = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
184 
185       vprod01234567 = vmull_u8(vi3x01234567, vk3x01234567);
186       vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
187       vprod89ABCDEF = vmull_u8(vi3x89ABCDEF, vk3x89ABCDEF);
188       vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi3x89ABCDEF);
189       vprodGHIJKLMN = vmull_u8(vi3xGHIJKLMN, vk3xGHIJKLMN);
190       vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi3xGHIJKLMN);
191       vprodOPQRSTUV = vmull_u8(vi3xOPQRSTUV, vk3xOPQRSTUV);
192       vsumOPQRSTUV = vaddw_u8(vsumOPQRSTUV, vi3xOPQRSTUV);
193 
194       vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vprod01234567)));
195       vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vprod01234567)));
196       vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc89AB), vget_low_u16(vprod89ABCDEF)));
197       vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccCDEF), vget_high_u16(vprod89ABCDEF)));
198       vaccGHIJ = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccGHIJ), vget_low_u16(vprodGHIJKLMN)));
199       vaccKLMN = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccKLMN), vget_high_u16(vprodGHIJKLMN)));
200       vaccOPQR = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccOPQR), vget_low_u16(vprodOPQRSTUV)));
201       vaccSTUV = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccSTUV), vget_high_u16(vprodOPQRSTUV)));
202       const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
203       const uint8x8_t vk4x01234567 = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
204       const uint8x8_t vi4x89ABCDEF = vld1_u8(i4); i4 += 8;
205       const uint8x8_t vk4x89ABCDEF = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
206       const uint8x8_t vi4xGHIJKLMN = vld1_u8(i4); i4 += 8;
207       const uint8x8_t vk4xGHIJKLMN = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
208       const uint8x8_t vi4xOPQRSTUV = vld1_u8(i4); i4 += 8;
209       const uint8x8_t vk4xOPQRSTUV = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
210 
211       vprod01234567 = vmull_u8(vi4x01234567, vk4x01234567);
212       vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
213       vprod89ABCDEF = vmull_u8(vi4x89ABCDEF, vk4x89ABCDEF);
214       vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi4x89ABCDEF);
215       vprodGHIJKLMN = vmull_u8(vi4xGHIJKLMN, vk4xGHIJKLMN);
216       vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi4xGHIJKLMN);
217       vprodOPQRSTUV = vmull_u8(vi4xOPQRSTUV, vk4xOPQRSTUV);
218       vsumOPQRSTUV = vaddw_u8(vsumOPQRSTUV, vi4xOPQRSTUV);
219 
220       vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vprod01234567)));
221       vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vprod01234567)));
222       vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc89AB), vget_low_u16(vprod89ABCDEF)));
223       vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccCDEF), vget_high_u16(vprod89ABCDEF)));
224       vaccGHIJ = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccGHIJ), vget_low_u16(vprodGHIJKLMN)));
225       vaccKLMN = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccKLMN), vget_high_u16(vprodGHIJKLMN)));
226       vaccOPQR = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccOPQR), vget_low_u16(vprodOPQRSTUV)));
227       vaccSTUV = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccSTUV), vget_high_u16(vprodOPQRSTUV)));
228       const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
229       const uint8x8_t vk5x01234567 = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
230       const uint8x8_t vi5x89ABCDEF = vld1_u8(i5); i5 += 8;
231       const uint8x8_t vk5x89ABCDEF = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
232       const uint8x8_t vi5xGHIJKLMN = vld1_u8(i5); i5 += 8;
233       const uint8x8_t vk5xGHIJKLMN = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
234       const uint8x8_t vi5xOPQRSTUV = vld1_u8(i5); i5 += 8;
235       const uint8x8_t vk5xOPQRSTUV = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
236 
237       vprod01234567 = vmull_u8(vi5x01234567, vk5x01234567);
238       vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
239       vprod89ABCDEF = vmull_u8(vi5x89ABCDEF, vk5x89ABCDEF);
240       vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi5x89ABCDEF);
241       vprodGHIJKLMN = vmull_u8(vi5xGHIJKLMN, vk5xGHIJKLMN);
242       vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi5xGHIJKLMN);
243       vprodOPQRSTUV = vmull_u8(vi5xOPQRSTUV, vk5xOPQRSTUV);
244       vsumOPQRSTUV = vaddw_u8(vsumOPQRSTUV, vi5xOPQRSTUV);
245 
246       vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vprod01234567)));
247       vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vprod01234567)));
248       vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc89AB), vget_low_u16(vprod89ABCDEF)));
249       vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccCDEF), vget_high_u16(vprod89ABCDEF)));
250       vaccGHIJ = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccGHIJ), vget_low_u16(vprodGHIJKLMN)));
251       vaccKLMN = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccKLMN), vget_high_u16(vprodGHIJKLMN)));
252       vaccOPQR = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccOPQR), vget_low_u16(vprodOPQRSTUV)));
253       vaccSTUV = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccSTUV), vget_high_u16(vprodOPQRSTUV)));
254       const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
255       const uint8x8_t vk6x01234567 = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
256       const uint8x8_t vi6x89ABCDEF = vld1_u8(i6); i6 += 8;
257       const uint8x8_t vk6x89ABCDEF = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
258       const uint8x8_t vi6xGHIJKLMN = vld1_u8(i6); i6 += 8;
259       const uint8x8_t vk6xGHIJKLMN = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
260       const uint8x8_t vi6xOPQRSTUV = vld1_u8(i6); i6 += 8;
261       const uint8x8_t vk6xOPQRSTUV = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
262 
263       vprod01234567 = vmull_u8(vi6x01234567, vk6x01234567);
264       vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
265       vprod89ABCDEF = vmull_u8(vi6x89ABCDEF, vk6x89ABCDEF);
266       vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi6x89ABCDEF);
267       vprodGHIJKLMN = vmull_u8(vi6xGHIJKLMN, vk6xGHIJKLMN);
268       vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi6xGHIJKLMN);
269       vprodOPQRSTUV = vmull_u8(vi6xOPQRSTUV, vk6xOPQRSTUV);
270       vsumOPQRSTUV = vaddw_u8(vsumOPQRSTUV, vi6xOPQRSTUV);
271 
272       vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vprod01234567)));
273       vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vprod01234567)));
274       vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc89AB), vget_low_u16(vprod89ABCDEF)));
275       vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccCDEF), vget_high_u16(vprod89ABCDEF)));
276       vaccGHIJ = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccGHIJ), vget_low_u16(vprodGHIJKLMN)));
277       vaccKLMN = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccKLMN), vget_high_u16(vprodGHIJKLMN)));
278       vaccOPQR = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccOPQR), vget_low_u16(vprodOPQRSTUV)));
279       vaccSTUV = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccSTUV), vget_high_u16(vprodOPQRSTUV)));
280       const uint8x8_t vi7x01234567 = vld1_u8(i7); i7 += 8;
281       const uint8x8_t vk7x01234567 = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
282       const uint8x8_t vi7x89ABCDEF = vld1_u8(i7); i7 += 8;
283       const uint8x8_t vk7x89ABCDEF = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
284       const uint8x8_t vi7xGHIJKLMN = vld1_u8(i7); i7 += 8;
285       const uint8x8_t vk7xGHIJKLMN = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
286       const uint8x8_t vi7xOPQRSTUV = vld1_u8(i7); i7 += 8;
287       const uint8x8_t vk7xOPQRSTUV = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
288 
289       vprod01234567 = vmull_u8(vi7x01234567, vk7x01234567);
290       vsum01234567 = vaddw_u8(vsum01234567, vi7x01234567);
291       vprod89ABCDEF = vmull_u8(vi7x89ABCDEF, vk7x89ABCDEF);
292       vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi7x89ABCDEF);
293       vprodGHIJKLMN = vmull_u8(vi7xGHIJKLMN, vk7xGHIJKLMN);
294       vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi7xGHIJKLMN);
295       vprodOPQRSTUV = vmull_u8(vi7xOPQRSTUV, vk7xOPQRSTUV);
296       vsumOPQRSTUV = vaddw_u8(vsumOPQRSTUV, vi7xOPQRSTUV);
297 
298       vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vprod01234567)));
299       vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vprod01234567)));
300       vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc89AB), vget_low_u16(vprod89ABCDEF)));
301       vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccCDEF), vget_high_u16(vprod89ABCDEF)));
302       vaccGHIJ = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccGHIJ), vget_low_u16(vprodGHIJKLMN)));
303       vaccKLMN = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccKLMN), vget_high_u16(vprodGHIJKLMN)));
304       vaccOPQR = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccOPQR), vget_low_u16(vprodOPQRSTUV)));
305       vaccSTUV = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccSTUV), vget_high_u16(vprodOPQRSTUV)));
306       const uint8x8_t vi8x01234567 = vld1_u8(i8); i8 += 8;
307       const uint8x8_t vk8x01234567 = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
308       const uint8x8_t vi8x89ABCDEF = vld1_u8(i8); i8 += 8;
309       const uint8x8_t vk8x89ABCDEF = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
310       const uint8x8_t vi8xGHIJKLMN = vld1_u8(i8); i8 += 8;
311       const uint8x8_t vk8xGHIJKLMN = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
312       const uint8x8_t vi8xOPQRSTUV = vld1_u8(i8); i8 += 8;
313       const uint8x8_t vk8xOPQRSTUV = vld1_u8(w); w = (const void*) ((const int8_t*) w + 8);
314 
315       vprod01234567 = vmull_u8(vi8x01234567, vk8x01234567);
316       vsum01234567 = vaddw_u8(vsum01234567, vi8x01234567);
317       vprod89ABCDEF = vmull_u8(vi8x89ABCDEF, vk8x89ABCDEF);
318       vsum89ABCDEF = vaddw_u8(vsum89ABCDEF, vi8x89ABCDEF);
319       vprodGHIJKLMN = vmull_u8(vi8xGHIJKLMN, vk8xGHIJKLMN);
320       vsumGHIJKLMN = vaddw_u8(vsumGHIJKLMN, vi8xGHIJKLMN);
321       vprodOPQRSTUV = vmull_u8(vi8xOPQRSTUV, vk8xOPQRSTUV);
322       vsumOPQRSTUV = vaddw_u8(vsumOPQRSTUV, vi8xOPQRSTUV);
323 
324       vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vprod01234567)));
325       vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vprod01234567)));
326       vacc89AB = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc89AB), vget_low_u16(vprod89ABCDEF)));
327       vaccCDEF = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccCDEF), vget_high_u16(vprod89ABCDEF)));
328       vaccGHIJ = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccGHIJ), vget_low_u16(vprodGHIJKLMN)));
329       vaccKLMN = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccKLMN), vget_high_u16(vprodGHIJKLMN)));
330       vaccOPQR = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccOPQR), vget_low_u16(vprodOPQRSTUV)));
331       vaccSTUV = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vaccSTUV), vget_high_u16(vprodOPQRSTUV)));
332 
333       vacc0123 = vreinterpretq_s32_u32(vmlsl_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vsum01234567), vget_low_u16(vkernel_zero_point16)));
334       vacc4567 = vreinterpretq_s32_u32(vmlsl_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vsum01234567), vget_high_u16(vkernel_zero_point16)));
335       vacc89AB = vreinterpretq_s32_u32(vmlsl_u16(vreinterpretq_u32_s32(vacc89AB), vget_low_u16(vsum89ABCDEF), vget_low_u16(vkernel_zero_point16)));
336       vaccCDEF = vreinterpretq_s32_u32(vmlsl_u16(vreinterpretq_u32_s32(vaccCDEF), vget_high_u16(vsum89ABCDEF), vget_high_u16(vkernel_zero_point16)));
337       vaccGHIJ = vreinterpretq_s32_u32(vmlsl_u16(vreinterpretq_u32_s32(vaccGHIJ), vget_low_u16(vsumGHIJKLMN), vget_low_u16(vkernel_zero_point16)));
338       vaccKLMN = vreinterpretq_s32_u32(vmlsl_u16(vreinterpretq_u32_s32(vaccKLMN), vget_high_u16(vsumGHIJKLMN), vget_high_u16(vkernel_zero_point16)));
339       vaccOPQR = vreinterpretq_s32_u32(vmlsl_u16(vreinterpretq_u32_s32(vaccOPQR), vget_low_u16(vsumOPQRSTUV), vget_low_u16(vkernel_zero_point16)));
340       vaccSTUV = vreinterpretq_s32_u32(vmlsl_u16(vreinterpretq_u32_s32(vaccSTUV), vget_high_u16(vsumOPQRSTUV), vget_high_u16(vkernel_zero_point16)));
341 
342       vacc0123 = vshlq_s32(vacc0123, vright_pre_shift);
343       vacc4567 = vshlq_s32(vacc4567, vright_pre_shift);
344       vacc89AB = vshlq_s32(vacc89AB, vright_pre_shift);
345       vaccCDEF = vshlq_s32(vaccCDEF, vright_pre_shift);
346       vaccGHIJ = vshlq_s32(vaccGHIJ, vright_pre_shift);
347       vaccKLMN = vshlq_s32(vaccKLMN, vright_pre_shift);
348       vaccOPQR = vshlq_s32(vaccOPQR, vright_pre_shift);
349       vaccSTUV = vshlq_s32(vaccSTUV, vright_pre_shift);
350 
351       vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
352       vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
353       vacc89AB = vqdmulhq_s32(vacc89AB, vmultiplier);
354       vaccCDEF = vqdmulhq_s32(vaccCDEF, vmultiplier);
355       vaccGHIJ = vqdmulhq_s32(vaccGHIJ, vmultiplier);
356       vaccKLMN = vqdmulhq_s32(vaccKLMN, vmultiplier);
357       vaccOPQR = vqdmulhq_s32(vaccOPQR, vmultiplier);
358       vaccSTUV = vqdmulhq_s32(vaccSTUV, vmultiplier);
359 
360       vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
361       vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
362       vacc89AB = vrshlq_s32(vacc89AB, vright_post_shift);
363       vaccCDEF = vrshlq_s32(vaccCDEF, vright_post_shift);
364       vaccGHIJ = vrshlq_s32(vaccGHIJ, vright_post_shift);
365       vaccKLMN = vrshlq_s32(vaccKLMN, vright_post_shift);
366       vaccOPQR = vrshlq_s32(vaccOPQR, vright_post_shift);
367       vaccSTUV = vrshlq_s32(vaccSTUV, vright_post_shift);
368 
369 #if XNN_ARCH_ARM64
370       const int16x8_t vacc01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567), voutput_zero_point);
371       const int16x8_t vacc89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF), voutput_zero_point);
372       const int16x8_t vaccGHIJKLMN = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vaccGHIJ), vaccKLMN), voutput_zero_point);
373       const int16x8_t vaccOPQRSTUV = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vaccOPQR), vaccSTUV), voutput_zero_point);
374 
375       uint8x16_t vout0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc01234567), vacc89ABCDEF);
376       uint8x16_t voutGHIJKLMNOPQRSTUV = vqmovun_high_s16(vqmovun_s16(vaccGHIJKLMN), vaccOPQRSTUV);
377 #else
378       const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
379       const int16x8_t vacc89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF)), voutput_zero_point);
380       const int16x8_t vaccGHIJKLMN = vqaddq_s16(vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN)), voutput_zero_point);
381       const int16x8_t vaccOPQRSTUV = vqaddq_s16(vcombine_s16(vqmovn_s32(vaccOPQR), vqmovn_s32(vaccSTUV)), voutput_zero_point);
382 
383       uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
384       uint8x16_t voutGHIJKLMNOPQRSTUV = vcombine_u8(vqmovun_s16(vaccGHIJKLMN), vqmovun_s16(vaccOPQRSTUV));
385 #endif
386 
387       vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
388       voutGHIJKLMNOPQRSTUV = vmaxq_u8(voutGHIJKLMNOPQRSTUV, voutput_min);
389 
390       vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
391       voutGHIJKLMNOPQRSTUV = vminq_u8(voutGHIJKLMNOPQRSTUV, voutput_max);
392 
393       vst1q_u8(output, vout0123456789ABCDEF); output += 16;
394       vst1q_u8(output, voutGHIJKLMNOPQRSTUV); output += 16;
395     }
396     if XNN_UNLIKELY(c != 0) {
397       const uint8_t* k = (const uint8_t*) ((const int32_t*) w + 32);
398       do {
399         int32x4_t vacc0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
400         int32x4_t vacc4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
401 
402         const int16x8_t vi0x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i0))); i0 += 8;
403         const int16x8_t vk0x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8(k), vkernel_zero_point)); k += 8;
404 
405         vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi0x01234567), vget_low_s16(vk0x01234567));
406         vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi0x01234567), vget_high_s16(vk0x01234567));
407         const int16x8_t vi1x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i1))); i1 += 8;
408         const int16x8_t vk1x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8((const void*) (k + 24)), vkernel_zero_point));
409 
410         vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi1x01234567), vget_low_s16(vk1x01234567));
411         vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi1x01234567), vget_high_s16(vk1x01234567));
412         const int16x8_t vi2x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i2))); i2 += 8;
413         const int16x8_t vk2x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8((const void*) (k + 56)), vkernel_zero_point));
414 
415         vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi2x01234567), vget_low_s16(vk2x01234567));
416         vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi2x01234567), vget_high_s16(vk2x01234567));
417         const int16x8_t vi3x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i3))); i3 += 8;
418         const int16x8_t vk3x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8((const void*) (k + 88)), vkernel_zero_point));
419 
420         vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi3x01234567), vget_low_s16(vk3x01234567));
421         vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi3x01234567), vget_high_s16(vk3x01234567));
422         const int16x8_t vi4x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i4))); i4 += 8;
423         const int16x8_t vk4x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8((const void*) (k + 120)), vkernel_zero_point));
424 
425         vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi4x01234567), vget_low_s16(vk4x01234567));
426         vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi4x01234567), vget_high_s16(vk4x01234567));
427         const int16x8_t vi5x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i5))); i5 += 8;
428         const int16x8_t vk5x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8((const void*) (k + 152)), vkernel_zero_point));
429 
430         vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi5x01234567), vget_low_s16(vk5x01234567));
431         vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi5x01234567), vget_high_s16(vk5x01234567));
432         const int16x8_t vi6x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i6))); i6 += 8;
433         const int16x8_t vk6x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8((const void*) (k + 184)), vkernel_zero_point));
434 
435         vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi6x01234567), vget_low_s16(vk6x01234567));
436         vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi6x01234567), vget_high_s16(vk6x01234567));
437         const int16x8_t vi7x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i7))); i7 += 8;
438         const int16x8_t vk7x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8((const void*) (k + 216)), vkernel_zero_point));
439 
440         vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi7x01234567), vget_low_s16(vk7x01234567));
441         vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi7x01234567), vget_high_s16(vk7x01234567));
442         const int16x8_t vi8x01234567 = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(i8))); i8 += 8;
443         const int16x8_t vk8x01234567 = vreinterpretq_s16_u16(vsubl_u8(vld1_u8((const void*) (k + 248)), vkernel_zero_point));
444 
445         vacc0123 = vmlal_s16(vacc0123, vget_low_s16(vi8x01234567), vget_low_s16(vk8x01234567));
446         vacc4567 = vmlal_s16(vacc4567, vget_high_s16(vi8x01234567), vget_high_s16(vk8x01234567));
447 
448         vacc0123 = vrshlq_s32(vacc0123, vright_pre_shift);
449         vacc4567 = vrshlq_s32(vacc4567, vright_pre_shift);
450 
451         vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
452         vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
453 
454         vacc0123 = vrshlq_s32(vacc0123, vright_post_shift);
455         vacc4567 = vrshlq_s32(vacc4567, vright_post_shift);
456 
457 #if XNN_ARCH_ARM64
458         const int16x8_t vacc01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567), voutput_zero_point);
459         uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
460 #else
461         const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
462         uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
463 #endif
464 
465         vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
466         vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
467 
468         if XNN_LIKELY(c >= 8) {
469           vst1_u8(output, vout01234567); output += 8;
470           c -= 8;
471         } else {
472           if (c & 4) {
473             vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
474             vout01234567 = vext_u8(vout01234567, vout01234567, 4);
475           }
476           if (c & 2) {
477             vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
478             vout01234567 = vext_u8(vout01234567, vout01234567, 2);
479           }
480           if (c & 1) {
481             vst1_lane_u8(output, vout01234567, 0); output += 1;
482           }
483           c = 0;
484         }
485       } while (c != 0);
486     }
487 
488     output = (uint8_t*) ((uintptr_t) output + output_increment);
489   } while (--output_width != 0);
490 }
491