xref: /aosp_15_r20/external/XNNPACK/src/qu8-gemm/gen/4x16c4-minmax-rndnu-neondot.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qu8-gemm/c4-neondot.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/gemm.h>
15 #include <xnnpack/math.h>
16 
17 
xnn_qu8_gemm_minmax_rndnu_ukernel_4x16c4__neondot(size_t mr,size_t nc,size_t kc,const uint8_t * restrict a,size_t a_stride,const void * restrict w,uint8_t * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qu8_gemm_minmax_rndnu_ukernel_4x16c4__neondot(
19     size_t mr,
20     size_t nc,
21     size_t kc,
22     const uint8_t* restrict a,
23     size_t a_stride,
24     const void* restrict w,
25     uint8_t* restrict c,
26     size_t cm_stride,
27     size_t cn_stride,
28     const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
29 {
30   assert(mr != 0);
31   assert(mr <= 4);
32   assert(nc != 0);
33   assert(kc != 0);
34   assert(kc % sizeof(uint8_t) == 0);
35   assert(a != NULL);
36   assert(w != NULL);
37   assert(c != NULL);
38 
39   kc = round_up_po2(kc, 4 * sizeof(uint8_t));
40   const uint8_t* a0 = a;
41   uint8_t* c0 = c;
42   const uint8_t* a1 = (const uint8_t*) ((uintptr_t) a0 + a_stride);
43   uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
44   if XNN_UNPREDICTABLE(mr < 2) {
45     a1 = a0;
46     c1 = c0;
47   }
48   const uint8_t* a2 = (const uint8_t*) ((uintptr_t) a1 + a_stride);
49   uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
50   if XNN_UNPREDICTABLE(mr <= 2) {
51     a2 = a1;
52     c2 = c1;
53   }
54   const uint8_t* a3 = (const uint8_t*) ((uintptr_t) a2 + a_stride);
55   uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
56   if XNN_UNPREDICTABLE(mr != 4) {
57     a3 = a2;
58     c3 = c2;
59   }
60 
61   const uint8x8_t va_zero_point = vld1_dup_u8(&params->rndnu_neon.kernel_zero_point[0]);
62 
63   // Loop over groups of 16 columns.
64   do {
65     // Initialize accumulators with bias. 16 bias values are loaded from the
66     // weight matrix, at the start of the group of 16 columns.
67     uint32x4_t vpacc0x0123 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
68     uint32x4_t vpacc0x4567 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
69     uint32x4_t vpacc0x89AB = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
70     uint32x4_t vpacc0xCDEF = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
71     uint32x4_t vpacc1x0123 = vpacc0x0123;
72     uint32x4_t vpacc1x4567 = vpacc0x4567;
73     uint32x4_t vpacc1x89AB = vpacc0x89AB;
74     uint32x4_t vpacc1xCDEF = vpacc0xCDEF;
75     uint32x4_t vpacc2x0123 = vpacc0x0123;
76     uint32x4_t vpacc2x4567 = vpacc0x4567;
77     uint32x4_t vpacc2x89AB = vpacc0x89AB;
78     uint32x4_t vpacc2xCDEF = vpacc0xCDEF;
79     uint32x4_t vpacc3x0123 = vpacc0x0123;
80     uint32x4_t vpacc3x4567 = vpacc0x4567;
81     uint32x4_t vpacc3x89AB = vpacc0x89AB;
82     uint32x4_t vpacc3xCDEF = vpacc0xCDEF;
83     uint32x2_t vnacc0 = vmov_n_u32(0);
84     uint32x2_t vnacc1 = vmov_n_u32(0);
85     uint32x2_t vnacc2 = vmov_n_u32(0);
86     uint32x2_t vnacc3 = vmov_n_u32(0);
87 
88     // Inner accumulation loop along the 16 columns.
89     size_t k = kc;
90     // 2x partial unrolled loop to load 8 bytes at a time.
91     while (k >= 8 * sizeof(uint8_t)) {
92       // Load a 4x8 block of activations.
93       const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 8;
94       const uint8x8_t va1x01234567 = vld1_u8(a1); a1 += 8;
95       const uint8x8_t va2x01234567 = vld1_u8(a2); a2 += 8;
96       const uint8x8_t va3x01234567 = vld1_u8(a3); a3 += 8;
97 
98       // Load a 8x16 block of weights.
99       const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
100       const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
101       const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
102       const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
103       const uint8x16_t vb4567x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
104       const uint8x16_t vb4567x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
105       const uint8x16_t vb4567x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
106       const uint8x16_t vb4567xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
107 
108       // Multiply-accumulate: 4x8 * 8x16 --> 4x16.
109       vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
110       vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
111       vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
112       vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
113       vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
114       vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb4567x0123, va0x01234567, 1);
115       vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb4567x4567, va0x01234567, 1);
116       vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb4567x89AB, va0x01234567, 1);
117       vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb4567xCDEF, va0x01234567, 1);
118       vnacc1 = vdot_u32(vnacc1, va_zero_point, va1x01234567);
119       vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
120       vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
121       vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb0123x89AB, va1x01234567, 0);
122       vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb0123xCDEF, va1x01234567, 0);
123       vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb4567x0123, va1x01234567, 1);
124       vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb4567x4567, va1x01234567, 1);
125       vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb4567x89AB, va1x01234567, 1);
126       vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb4567xCDEF, va1x01234567, 1);
127       vnacc2 = vdot_u32(vnacc2, va_zero_point, va2x01234567);
128       vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
129       vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
130       vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb0123x89AB, va2x01234567, 0);
131       vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb0123xCDEF, va2x01234567, 0);
132       vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb4567x0123, va2x01234567, 1);
133       vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb4567x4567, va2x01234567, 1);
134       vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb4567x89AB, va2x01234567, 1);
135       vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb4567xCDEF, va2x01234567, 1);
136       vnacc3 = vdot_u32(vnacc3, va_zero_point, va3x01234567);
137       vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
138       vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
139       vpacc3x89AB = vdotq_lane_u32(vpacc3x89AB, vb0123x89AB, va3x01234567, 0);
140       vpacc3xCDEF = vdotq_lane_u32(vpacc3xCDEF, vb0123xCDEF, va3x01234567, 0);
141       vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb4567x0123, va3x01234567, 1);
142       vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb4567x4567, va3x01234567, 1);
143       vpacc3x89AB = vdotq_lane_u32(vpacc3x89AB, vb4567x89AB, va3x01234567, 1);
144       vpacc3xCDEF = vdotq_lane_u32(vpacc3xCDEF, vb4567xCDEF, va3x01234567, 1);
145 
146       k -= 8 * sizeof(uint8_t);
147     }
148     // Handle up to 4 final positions of `k`
149     if XNN_UNLIKELY(k != 0) {
150       // Load a 4x4 block of activations.
151       const uint8x8_t va0x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a0, vmov_n_u32(0), 0)); a0 += 4;
152       const uint8x8_t va1x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a1, vmov_n_u32(0), 0)); a1 += 4;
153       const uint8x8_t va2x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a2, vmov_n_u32(0), 0)); a2 += 4;
154       const uint8x8_t va3x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a3, vmov_n_u32(0), 0)); a3 += 4;
155 
156       // Load a 4x16 block of weights.
157       const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
158       const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
159       const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
160       const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
161 
162       // Multiply-accumulate: 4x4 * 4x16 --> 4x16.
163       vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
164       vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
165       vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
166       vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
167       vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
168       vnacc1 = vdot_u32(vnacc1, va_zero_point, va1x01234567);
169       vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
170       vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
171       vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb0123x89AB, va1x01234567, 0);
172       vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb0123xCDEF, va1x01234567, 0);
173       vnacc2 = vdot_u32(vnacc2, va_zero_point, va2x01234567);
174       vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
175       vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
176       vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb0123x89AB, va2x01234567, 0);
177       vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb0123xCDEF, va2x01234567, 0);
178       vnacc3 = vdot_u32(vnacc3, va_zero_point, va3x01234567);
179       vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
180       vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
181       vpacc3x89AB = vdotq_lane_u32(vpacc3x89AB, vb0123x89AB, va3x01234567, 0);
182       vpacc3xCDEF = vdotq_lane_u32(vpacc3xCDEF, vb0123xCDEF, va3x01234567, 0);
183     }
184 
185     // Subtract zero point from accumulators.
186     vnacc0 = vpadd_u32(vnacc0, vnacc0);
187     const uint32x4_t vnacc0x0123 = vcombine_u32(vnacc0, vnacc0);
188     int32x4_t vacc0x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x0123, vnacc0x0123));
189     int32x4_t vacc0x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x4567, vnacc0x0123));
190     int32x4_t vacc0x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc0x89AB, vnacc0x0123));
191     int32x4_t vacc0xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc0xCDEF, vnacc0x0123));
192     vnacc1 = vpadd_u32(vnacc1, vnacc1);
193     const uint32x4_t vnacc1x0123 = vcombine_u32(vnacc1, vnacc1);
194     int32x4_t vacc1x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x0123, vnacc1x0123));
195     int32x4_t vacc1x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x4567, vnacc1x0123));
196     int32x4_t vacc1x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc1x89AB, vnacc1x0123));
197     int32x4_t vacc1xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc1xCDEF, vnacc1x0123));
198     vnacc2 = vpadd_u32(vnacc2, vnacc2);
199     const uint32x4_t vnacc2x0123 = vcombine_u32(vnacc2, vnacc2);
200     int32x4_t vacc2x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x0123, vnacc2x0123));
201     int32x4_t vacc2x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x4567, vnacc2x0123));
202     int32x4_t vacc2x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc2x89AB, vnacc2x0123));
203     int32x4_t vacc2xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc2xCDEF, vnacc2x0123));
204     vnacc3 = vpadd_u32(vnacc3, vnacc3);
205     const uint32x4_t vnacc3x0123 = vcombine_u32(vnacc3, vnacc3);
206     int32x4_t vacc3x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x0123, vnacc3x0123));
207     int32x4_t vacc3x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x4567, vnacc3x0123));
208     int32x4_t vacc3x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc3x89AB, vnacc3x0123));
209     int32x4_t vacc3xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc3xCDEF, vnacc3x0123));
210 
211     const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
212     const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
213     const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
214 
215     vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
216     vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
217     vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
218     vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
219     vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
220     vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
221     vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
222     vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
223     vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
224     vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
225     vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
226     vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
227     vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
228     vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
229     vacc3x89AB = vshlq_s32(vacc3x89AB, vright_pre_shift);
230     vacc3xCDEF = vshlq_s32(vacc3xCDEF, vright_pre_shift);
231 
232     vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
233     vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
234     vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
235     vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
236     vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
237     vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
238     vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
239     vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
240     vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
241     vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
242     vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
243     vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
244     vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
245     vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
246     vacc3x89AB = vqdmulhq_s32(vacc3x89AB, vmultiplier);
247     vacc3xCDEF = vqdmulhq_s32(vacc3xCDEF, vmultiplier);
248 
249     vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
250     vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
251     vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
252     vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
253     vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
254     vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
255     vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
256     vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
257     vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
258     vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
259     vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
260     vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
261     vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
262     vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
263     vacc3x89AB = vrshlq_s32(vacc3x89AB, vright_post_shift);
264     vacc3xCDEF = vrshlq_s32(vacc3xCDEF, vright_post_shift);
265 
266     const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
267 #if XNN_ARCH_ARM64
268     const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
269     const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
270     const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
271     const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
272     const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
273     const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
274     const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
275     const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x89AB), vacc3xCDEF), voutput_zero_point);
276 
277     uint8x16_t vout0x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc0x89ABCDEF);
278     uint8x16_t vout1x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc1x01234567), vacc1x89ABCDEF);
279     uint8x16_t vout2x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc2x01234567), vacc2x89ABCDEF);
280     uint8x16_t vout3x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc3x01234567), vacc3x89ABCDEF);
281 #else
282     const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
283     const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
284     const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
285     const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
286     const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
287     const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
288     const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
289     const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x89AB), vqmovn_s32(vacc3xCDEF)), voutput_zero_point);
290 
291     uint8x16_t vout0x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc0x89ABCDEF));
292     uint8x16_t vout1x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc1x01234567), vqmovun_s16(vacc1x89ABCDEF));
293     uint8x16_t vout2x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc2x01234567), vqmovun_s16(vacc2x89ABCDEF));
294     uint8x16_t vout3x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc3x01234567), vqmovun_s16(vacc3x89ABCDEF));
295 #endif
296     const uint8x16_t voutput_min = vld1q_dup_u8(&params->rndnu_neon.output_min);
297     const uint8x16_t voutput_max = vld1q_dup_u8(&params->rndnu_neon.output_max);
298 
299     vout0x0123456789ABCDEF = vmaxq_u8(vout0x0123456789ABCDEF, voutput_min);
300     vout1x0123456789ABCDEF = vmaxq_u8(vout1x0123456789ABCDEF, voutput_min);
301     vout2x0123456789ABCDEF = vmaxq_u8(vout2x0123456789ABCDEF, voutput_min);
302     vout3x0123456789ABCDEF = vmaxq_u8(vout3x0123456789ABCDEF, voutput_min);
303 
304     vout0x0123456789ABCDEF = vminq_u8(vout0x0123456789ABCDEF, voutput_max);
305     vout1x0123456789ABCDEF = vminq_u8(vout1x0123456789ABCDEF, voutput_max);
306     vout2x0123456789ABCDEF = vminq_u8(vout2x0123456789ABCDEF, voutput_max);
307     vout3x0123456789ABCDEF = vminq_u8(vout3x0123456789ABCDEF, voutput_max);
308 
309     if (nc >= 16) {
310       vst1q_u8(c0 + 0, vout0x0123456789ABCDEF);
311       vst1q_u8(c1 + 0, vout1x0123456789ABCDEF);
312       vst1q_u8(c2 + 0, vout2x0123456789ABCDEF);
313       vst1q_u8(c3 + 0, vout3x0123456789ABCDEF);
314 
315       c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
316       c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
317       c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
318       c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
319 
320       a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
321       a1 = (const uint8_t*) ((uintptr_t) a1 - kc);
322       a2 = (const uint8_t*) ((uintptr_t) a2 - kc);
323       a3 = (const uint8_t*) ((uintptr_t) a3 - kc);
324 
325       nc -= 16;
326     } else {
327       uint8x16_t vout0x01234567_1x01234567 = vcombine_u8(vget_low_u8(vout0x0123456789ABCDEF), vget_low_u8(vout1x0123456789ABCDEF));
328       uint8x16_t vout2x01234567_3x01234567 = vcombine_u8(vget_low_u8(vout2x0123456789ABCDEF), vget_low_u8(vout3x0123456789ABCDEF));
329       if (nc & 8) {
330         vst1_u8(c0, vget_low_u8(vout0x01234567_1x01234567)); c0 += 8;
331         vst1_u8(c1, vget_high_u8(vout0x01234567_1x01234567)); c1 += 8;
332         vst1_u8(c2, vget_low_u8(vout2x01234567_3x01234567)); c2 += 8;
333         vst1_u8(c3, vget_high_u8(vout2x01234567_3x01234567)); c3 += 8;
334         vout0x01234567_1x01234567 = vcombine_u8(vget_high_u8(vout0x0123456789ABCDEF), vget_high_u8(vout1x0123456789ABCDEF));
335         vout2x01234567_3x01234567 = vcombine_u8(vget_high_u8(vout2x0123456789ABCDEF), vget_high_u8(vout3x0123456789ABCDEF));
336       }
337       if (nc & 4) {
338         vst1q_lane_u32((void*) c0, vreinterpretq_u32_u8(vout0x01234567_1x01234567), 0); c0 += 4;
339         vst1q_lane_u32((void*) c1, vreinterpretq_u32_u8(vout0x01234567_1x01234567), 2); c1 += 4;
340         vst1q_lane_u32((void*) c2, vreinterpretq_u32_u8(vout2x01234567_3x01234567), 0); c2 += 4;
341         vst1q_lane_u32((void*) c3, vreinterpretq_u32_u8(vout2x01234567_3x01234567), 2); c3 += 4;
342         vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
343         vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
344       }
345       if (nc & 2) {
346         vst1q_lane_u16((void*) c0, vreinterpretq_u16_u8(vout0x01234567_1x01234567), 0); c0 += 2;
347         vst1q_lane_u16((void*) c1, vreinterpretq_u16_u8(vout0x01234567_1x01234567), 4); c1 += 2;
348         vst1q_lane_u16((void*) c2, vreinterpretq_u16_u8(vout2x01234567_3x01234567), 0); c2 += 2;
349         vst1q_lane_u16((void*) c3, vreinterpretq_u16_u8(vout2x01234567_3x01234567), 4); c3 += 2;
350         vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
351         vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
352       }
353       if (nc & 1) {
354         vst1q_lane_u8(c0, vout0x01234567_1x01234567, 0);
355         vst1q_lane_u8(c1, vout0x01234567_1x01234567, 8);
356         vst1q_lane_u8(c2, vout2x01234567_3x01234567, 0);
357         vst1q_lane_u8(c3, vout2x01234567_3x01234567, 8);
358       }
359 
360       nc = 0;
361     }
362   } while (nc != 0);
363 }
364