xref: /aosp_15_r20/external/XNNPACK/src/qs8-igemm/gen/4x8c8-minmax-rndnu-neon-mull.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-igemm/c8-neon-mull.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/igemm.h>
15 #include <xnnpack/math.h>
16 
17 
xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c8__neon_mull(size_t mr,size_t nc,size_t kc,size_t ks,const int8_t ** restrict a,const void * restrict w,int8_t * restrict c,size_t cm_stride,size_t cn_stride,size_t a_offset,const int8_t * zero,const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c8__neon_mull(
19     size_t mr,
20     size_t nc,
21     size_t kc,
22     size_t ks,
23     const int8_t** restrict a,
24     const void* restrict w,
25     int8_t* restrict c,
26     size_t cm_stride,
27     size_t cn_stride,
28     size_t a_offset,
29     const int8_t* zero,
30     const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
31 {
32   assert(mr != 0);
33   assert(mr <= 4);
34   assert(nc != 0);
35   assert(kc != 0);
36   assert(ks != 0);
37   assert(ks % (4 * sizeof(void*)) == 0);
38   assert(a_offset % sizeof(int8_t) == 0);
39   assert(a != NULL);
40   assert(w != NULL);
41   assert(c != NULL);
42 
43   kc = round_up_po2(kc, 8 * sizeof(int8_t));
44   int8_t* c0 = c;
45   int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
46   if XNN_UNPREDICTABLE(mr < 2) {
47     c1 = c0;
48   }
49   int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
50   if XNN_UNPREDICTABLE(mr <= 2) {
51     c2 = c1;
52   }
53   int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
54   if XNN_UNPREDICTABLE(mr != 4) {
55     c3 = c2;
56   }
57 
58   do {
59     int32x4_t vacc0x0 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
60     int32x4_t vacc0x1 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
61     int32x4_t vacc0x2 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
62     int32x4_t vacc0x3 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
63     int32x4_t vacc0x4 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
64     int32x4_t vacc0x5 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
65     int32x4_t vacc0x6 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
66     int32x4_t vacc0x7 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
67     int32x4_t vacc1x0 = vacc0x0;
68     int32x4_t vacc1x1 = vacc0x1;
69     int32x4_t vacc1x2 = vacc0x2;
70     int32x4_t vacc1x3 = vacc0x3;
71     int32x4_t vacc1x4 = vacc0x4;
72     int32x4_t vacc1x5 = vacc0x5;
73     int32x4_t vacc1x6 = vacc0x6;
74     int32x4_t vacc1x7 = vacc0x7;
75     int32x4_t vacc2x0 = vacc0x0;
76     int32x4_t vacc2x1 = vacc0x1;
77     int32x4_t vacc2x2 = vacc0x2;
78     int32x4_t vacc2x3 = vacc0x3;
79     int32x4_t vacc2x4 = vacc0x4;
80     int32x4_t vacc2x5 = vacc0x5;
81     int32x4_t vacc2x6 = vacc0x6;
82     int32x4_t vacc2x7 = vacc0x7;
83     int32x4_t vacc3x0 = vacc0x0;
84     int32x4_t vacc3x1 = vacc0x1;
85     int32x4_t vacc3x2 = vacc0x2;
86     int32x4_t vacc3x3 = vacc0x3;
87     int32x4_t vacc3x4 = vacc0x4;
88     int32x4_t vacc3x5 = vacc0x5;
89     int32x4_t vacc3x6 = vacc0x6;
90     int32x4_t vacc3x7 = vacc0x7;
91 
92     size_t p = ks;
93     do {
94       const int8_t* restrict a0 = a[0];
95       if XNN_UNPREDICTABLE(a0 != zero) {
96         a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
97       }
98       const int8_t* restrict a1 = a[1];
99       if XNN_UNPREDICTABLE(a1 != zero) {
100         a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
101       }
102       const int8_t* restrict a2 = a[2];
103       if XNN_UNPREDICTABLE(a2 != zero) {
104         a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
105       }
106       const int8_t* restrict a3 = a[3];
107       if XNN_UNPREDICTABLE(a3 != zero) {
108         a3 = (const int8_t*) ((uintptr_t) a3 + a_offset);
109       }
110       a += 4;
111 
112       size_t k = kc;
113 
114       // Handle 8 bytes at a time using MUL.
115       while (k != 0) {
116         const int8x8_t va0 = vld1_s8(a0); a0 += 8;
117         const int8x8_t va1 = vld1_s8(a1); a1 += 8;
118         const int8x8_t va2 = vld1_s8(a2); a2 += 8;
119         const int8x8_t va3 = vld1_s8(a3); a3 += 8;
120 
121         const int8x8_t vb0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
122         const int16x8_t vprod0x0 = vmull_s8(vb0, va0);
123         const int16x8_t vprod1x0 = vmull_s8(vb0, va1);
124         const int16x8_t vprod2x0 = vmull_s8(vb0, va2);
125         const int16x8_t vprod3x0 = vmull_s8(vb0, va3);
126         vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0);
127         vacc1x0 = vpadalq_s16(vacc1x0, vprod1x0);
128         vacc2x0 = vpadalq_s16(vacc2x0, vprod2x0);
129         vacc3x0 = vpadalq_s16(vacc3x0, vprod3x0);
130         const int8x8_t vb1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
131         const int16x8_t vprod0x1 = vmull_s8(vb1, va0);
132         const int16x8_t vprod1x1 = vmull_s8(vb1, va1);
133         const int16x8_t vprod2x1 = vmull_s8(vb1, va2);
134         const int16x8_t vprod3x1 = vmull_s8(vb1, va3);
135         vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1);
136         vacc1x1 = vpadalq_s16(vacc1x1, vprod1x1);
137         vacc2x1 = vpadalq_s16(vacc2x1, vprod2x1);
138         vacc3x1 = vpadalq_s16(vacc3x1, vprod3x1);
139         const int8x8_t vb2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
140         const int16x8_t vprod0x2 = vmull_s8(vb2, va0);
141         const int16x8_t vprod1x2 = vmull_s8(vb2, va1);
142         const int16x8_t vprod2x2 = vmull_s8(vb2, va2);
143         const int16x8_t vprod3x2 = vmull_s8(vb2, va3);
144         vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2);
145         vacc1x2 = vpadalq_s16(vacc1x2, vprod1x2);
146         vacc2x2 = vpadalq_s16(vacc2x2, vprod2x2);
147         vacc3x2 = vpadalq_s16(vacc3x2, vprod3x2);
148         const int8x8_t vb3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
149         const int16x8_t vprod0x3 = vmull_s8(vb3, va0);
150         const int16x8_t vprod1x3 = vmull_s8(vb3, va1);
151         const int16x8_t vprod2x3 = vmull_s8(vb3, va2);
152         const int16x8_t vprod3x3 = vmull_s8(vb3, va3);
153         vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3);
154         vacc1x3 = vpadalq_s16(vacc1x3, vprod1x3);
155         vacc2x3 = vpadalq_s16(vacc2x3, vprod2x3);
156         vacc3x3 = vpadalq_s16(vacc3x3, vprod3x3);
157         const int8x8_t vb4 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
158         const int16x8_t vprod0x4 = vmull_s8(vb4, va0);
159         const int16x8_t vprod1x4 = vmull_s8(vb4, va1);
160         const int16x8_t vprod2x4 = vmull_s8(vb4, va2);
161         const int16x8_t vprod3x4 = vmull_s8(vb4, va3);
162         vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4);
163         vacc1x4 = vpadalq_s16(vacc1x4, vprod1x4);
164         vacc2x4 = vpadalq_s16(vacc2x4, vprod2x4);
165         vacc3x4 = vpadalq_s16(vacc3x4, vprod3x4);
166         const int8x8_t vb5 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
167         const int16x8_t vprod0x5 = vmull_s8(vb5, va0);
168         const int16x8_t vprod1x5 = vmull_s8(vb5, va1);
169         const int16x8_t vprod2x5 = vmull_s8(vb5, va2);
170         const int16x8_t vprod3x5 = vmull_s8(vb5, va3);
171         vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5);
172         vacc1x5 = vpadalq_s16(vacc1x5, vprod1x5);
173         vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5);
174         vacc3x5 = vpadalq_s16(vacc3x5, vprod3x5);
175         const int8x8_t vb6 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
176         const int16x8_t vprod0x6 = vmull_s8(vb6, va0);
177         const int16x8_t vprod1x6 = vmull_s8(vb6, va1);
178         const int16x8_t vprod2x6 = vmull_s8(vb6, va2);
179         const int16x8_t vprod3x6 = vmull_s8(vb6, va3);
180         vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6);
181         vacc1x6 = vpadalq_s16(vacc1x6, vprod1x6);
182         vacc2x6 = vpadalq_s16(vacc2x6, vprod2x6);
183         vacc3x6 = vpadalq_s16(vacc3x6, vprod3x6);
184         const int8x8_t vb7 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
185         const int16x8_t vprod0x7 = vmull_s8(vb7, va0);
186         const int16x8_t vprod1x7 = vmull_s8(vb7, va1);
187         const int16x8_t vprod2x7 = vmull_s8(vb7, va2);
188         const int16x8_t vprod3x7 = vmull_s8(vb7, va3);
189         vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7);
190         vacc1x7 = vpadalq_s16(vacc1x7, vprod1x7);
191         vacc2x7 = vpadalq_s16(vacc2x7, vprod2x7);
192         vacc3x7 = vpadalq_s16(vacc3x7, vprod3x7);
193 
194         k -= 8 * sizeof(int8_t);
195       }
196 
197       p -= 4 * sizeof(void*);
198     } while (p != 0);
199 
200 #if XNN_ARCH_ARM64
201     const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1);
202     const int32x4_t vsum0x23 = vpaddq_s32(vacc0x2, vacc0x3);
203     const int32x4_t vsum0x45 = vpaddq_s32(vacc0x4, vacc0x5);
204     const int32x4_t vsum0x67 = vpaddq_s32(vacc0x6, vacc0x7);
205     const int32x4_t vsum1x01 = vpaddq_s32(vacc1x0, vacc1x1);
206     const int32x4_t vsum1x23 = vpaddq_s32(vacc1x2, vacc1x3);
207     const int32x4_t vsum1x45 = vpaddq_s32(vacc1x4, vacc1x5);
208     const int32x4_t vsum1x67 = vpaddq_s32(vacc1x6, vacc1x7);
209     const int32x4_t vsum2x01 = vpaddq_s32(vacc2x0, vacc2x1);
210     const int32x4_t vsum2x23 = vpaddq_s32(vacc2x2, vacc2x3);
211     const int32x4_t vsum2x45 = vpaddq_s32(vacc2x4, vacc2x5);
212     const int32x4_t vsum2x67 = vpaddq_s32(vacc2x6, vacc2x7);
213     const int32x4_t vsum3x01 = vpaddq_s32(vacc3x0, vacc3x1);
214     const int32x4_t vsum3x23 = vpaddq_s32(vacc3x2, vacc3x3);
215     const int32x4_t vsum3x45 = vpaddq_s32(vacc3x4, vacc3x5);
216     const int32x4_t vsum3x67 = vpaddq_s32(vacc3x6, vacc3x7);
217 
218     int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23);
219     int32x4_t vacc0x4567 = vpaddq_s32(vsum0x45, vsum0x67);
220     int32x4_t vacc1x0123 = vpaddq_s32(vsum1x01, vsum1x23);
221     int32x4_t vacc1x4567 = vpaddq_s32(vsum1x45, vsum1x67);
222     int32x4_t vacc2x0123 = vpaddq_s32(vsum2x01, vsum2x23);
223     int32x4_t vacc2x4567 = vpaddq_s32(vsum2x45, vsum2x67);
224     int32x4_t vacc3x0123 = vpaddq_s32(vsum3x01, vsum3x23);
225     int32x4_t vacc3x4567 = vpaddq_s32(vsum3x45, vsum3x67);
226 #else
227     const int32x2_t vpsum0x0 = vadd_s32(vget_low_s32(vacc0x0), vget_high_s32(vacc0x0));
228     const int32x2_t vpsum0x1 = vadd_s32(vget_low_s32(vacc0x1), vget_high_s32(vacc0x1));
229     const int32x2_t vpsum0x2 = vadd_s32(vget_low_s32(vacc0x2), vget_high_s32(vacc0x2));
230     const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3));
231     const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1);
232     const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3);
233     int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 );
234     const int32x2_t vpsum0x4 = vadd_s32(vget_low_s32(vacc0x4), vget_high_s32(vacc0x4));
235     const int32x2_t vpsum0x5 = vadd_s32(vget_low_s32(vacc0x5), vget_high_s32(vacc0x5));
236     const int32x2_t vpsum0x6 = vadd_s32(vget_low_s32(vacc0x6), vget_high_s32(vacc0x6));
237     const int32x2_t vpsum0x7 = vadd_s32(vget_low_s32(vacc0x7), vget_high_s32(vacc0x7));
238     const int32x2_t vsum0x45 = vpadd_s32(vpsum0x4, vpsum0x5);
239     const int32x2_t vsum0x67 = vpadd_s32(vpsum0x6, vpsum0x7);
240     int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67 );
241     const int32x2_t vpsum1x0 = vadd_s32(vget_low_s32(vacc1x0), vget_high_s32(vacc1x0));
242     const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1));
243     const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2));
244     const int32x2_t vpsum1x3 = vadd_s32(vget_low_s32(vacc1x3), vget_high_s32(vacc1x3));
245     const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1);
246     const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3);
247     int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23 );
248     const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4));
249     const int32x2_t vpsum1x5 = vadd_s32(vget_low_s32(vacc1x5), vget_high_s32(vacc1x5));
250     const int32x2_t vpsum1x6 = vadd_s32(vget_low_s32(vacc1x6), vget_high_s32(vacc1x6));
251     const int32x2_t vpsum1x7 = vadd_s32(vget_low_s32(vacc1x7), vget_high_s32(vacc1x7));
252     const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5);
253     const int32x2_t vsum1x67 = vpadd_s32(vpsum1x6, vpsum1x7);
254     int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67 );
255     const int32x2_t vpsum2x0 = vadd_s32(vget_low_s32(vacc2x0), vget_high_s32(vacc2x0));
256     const int32x2_t vpsum2x1 = vadd_s32(vget_low_s32(vacc2x1), vget_high_s32(vacc2x1));
257     const int32x2_t vpsum2x2 = vadd_s32(vget_low_s32(vacc2x2), vget_high_s32(vacc2x2));
258     const int32x2_t vpsum2x3 = vadd_s32(vget_low_s32(vacc2x3), vget_high_s32(vacc2x3));
259     const int32x2_t vsum2x01 = vpadd_s32(vpsum2x0, vpsum2x1);
260     const int32x2_t vsum2x23 = vpadd_s32(vpsum2x2, vpsum2x3);
261     int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23 );
262     const int32x2_t vpsum2x4 = vadd_s32(vget_low_s32(vacc2x4), vget_high_s32(vacc2x4));
263     const int32x2_t vpsum2x5 = vadd_s32(vget_low_s32(vacc2x5), vget_high_s32(vacc2x5));
264     const int32x2_t vpsum2x6 = vadd_s32(vget_low_s32(vacc2x6), vget_high_s32(vacc2x6));
265     const int32x2_t vpsum2x7 = vadd_s32(vget_low_s32(vacc2x7), vget_high_s32(vacc2x7));
266     const int32x2_t vsum2x45 = vpadd_s32(vpsum2x4, vpsum2x5);
267     const int32x2_t vsum2x67 = vpadd_s32(vpsum2x6, vpsum2x7);
268     int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67 );
269     const int32x2_t vpsum3x0 = vadd_s32(vget_low_s32(vacc3x0), vget_high_s32(vacc3x0));
270     const int32x2_t vpsum3x1 = vadd_s32(vget_low_s32(vacc3x1), vget_high_s32(vacc3x1));
271     const int32x2_t vpsum3x2 = vadd_s32(vget_low_s32(vacc3x2), vget_high_s32(vacc3x2));
272     const int32x2_t vpsum3x3 = vadd_s32(vget_low_s32(vacc3x3), vget_high_s32(vacc3x3));
273     const int32x2_t vsum3x01 = vpadd_s32(vpsum3x0, vpsum3x1);
274     const int32x2_t vsum3x23 = vpadd_s32(vpsum3x2, vpsum3x3);
275     int32x4_t vacc3x0123 = vcombine_s32(vsum3x01, vsum3x23 );
276     const int32x2_t vpsum3x4 = vadd_s32(vget_low_s32(vacc3x4), vget_high_s32(vacc3x4));
277     const int32x2_t vpsum3x5 = vadd_s32(vget_low_s32(vacc3x5), vget_high_s32(vacc3x5));
278     const int32x2_t vpsum3x6 = vadd_s32(vget_low_s32(vacc3x6), vget_high_s32(vacc3x6));
279     const int32x2_t vpsum3x7 = vadd_s32(vget_low_s32(vacc3x7), vget_high_s32(vacc3x7));
280     const int32x2_t vsum3x45 = vpadd_s32(vpsum3x4, vpsum3x5);
281     const int32x2_t vsum3x67 = vpadd_s32(vpsum3x6, vpsum3x7);
282     int32x4_t vacc3x4567 = vcombine_s32(vsum3x45, vsum3x67 );
283 #endif
284 
285     const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
286     const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
287     const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
288 
289     vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
290     vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
291     vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift);
292     vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift);
293     vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift);
294     vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift);
295     vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift);
296     vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift);
297 
298     vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
299     vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
300     vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
301     vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
302     vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
303     vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
304     vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
305     vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
306 
307     vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
308     vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
309     vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
310     vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
311     vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
312     vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
313     vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
314     vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
315 
316     const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
317 #if XNN_ARCH_ARM64
318     int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
319     int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
320     int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567);
321     int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567);
322 
323     vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
324     vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
325     vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
326     vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
327 
328     int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
329     int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
330 #else
331     int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
332     int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
333     int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567));
334     int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567));
335 
336     vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
337     vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
338     vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
339     vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
340 
341     int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
342     int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
343 #endif
344 
345     const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
346     vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
347     vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
348 
349     const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
350     vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
351     vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
352 
353     if (nc >= 8) {
354       vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
355       vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
356       vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
357       vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
358 
359       c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
360       c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
361       c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
362       c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
363 
364       a = (const int8_t**restrict) ((uintptr_t) a - ks);
365 
366       nc -= 8;
367     } else {
368       if (nc & 4) {
369         vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
370         vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
371         vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
372         vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
373         vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
374         vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
375       }
376       if (nc & 2) {
377         vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
378         vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
379         vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
380         vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
381         vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
382         vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
383       }
384       if (nc & 1) {
385         vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
386         vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
387         vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
388         vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
389       }
390 
391       nc = 0;
392     }
393   } while (nc != 0);
394 }
395