xref: /aosp_15_r20/external/XNNPACK/src/qu8-gemm/gen/6x16c4-minmax-rndnu-neondot.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qu8-gemm/c4-neondot.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/gemm.h>
15 #include <xnnpack/math.h>
16 
17 
xnn_qu8_gemm_minmax_rndnu_ukernel_6x16c4__neondot(size_t mr,size_t nc,size_t kc,const uint8_t * restrict a,size_t a_stride,const void * restrict w,uint8_t * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qu8_gemm_minmax_rndnu_ukernel_6x16c4__neondot(
19     size_t mr,
20     size_t nc,
21     size_t kc,
22     const uint8_t* restrict a,
23     size_t a_stride,
24     const void* restrict w,
25     uint8_t* restrict c,
26     size_t cm_stride,
27     size_t cn_stride,
28     const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
29 {
30   assert(mr != 0);
31   assert(mr <= 6);
32   assert(nc != 0);
33   assert(kc != 0);
34   assert(kc % sizeof(uint8_t) == 0);
35   assert(a != NULL);
36   assert(w != NULL);
37   assert(c != NULL);
38 
39   kc = round_up_po2(kc, 4 * sizeof(uint8_t));
40   const uint8_t* a0 = a;
41   uint8_t* c0 = c;
42   const uint8_t* a1 = (const uint8_t*) ((uintptr_t) a0 + a_stride);
43   uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
44   if XNN_UNPREDICTABLE(mr < 2) {
45     a1 = a0;
46     c1 = c0;
47   }
48   const uint8_t* a2 = (const uint8_t*) ((uintptr_t) a1 + a_stride);
49   uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
50   if XNN_UNPREDICTABLE(mr <= 2) {
51     a2 = a1;
52     c2 = c1;
53   }
54   const uint8_t* a3 = (const uint8_t*) ((uintptr_t) a2 + a_stride);
55   uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
56   if XNN_UNPREDICTABLE(mr < 4) {
57     a3 = a2;
58     c3 = c2;
59   }
60   const uint8_t* a4 = (const uint8_t*) ((uintptr_t) a3 + a_stride);
61   uint8_t* c4 = (uint8_t*) ((uintptr_t) c3 + cm_stride);
62   if XNN_UNPREDICTABLE(mr <= 4) {
63     a4 = a3;
64     c4 = c3;
65   }
66   const uint8_t* a5 = (const uint8_t*) ((uintptr_t) a4 + a_stride);
67   uint8_t* c5 = (uint8_t*) ((uintptr_t) c4 + cm_stride);
68   if XNN_UNPREDICTABLE(mr != 6) {
69     a5 = a4;
70     c5 = c4;
71   }
72 
73   const uint8x8_t va_zero_point = vld1_dup_u8(&params->rndnu_neon.kernel_zero_point[0]);
74 
75   // Loop over groups of 16 columns.
76   do {
77     // Initialize accumulators with bias. 16 bias values are loaded from the
78     // weight matrix, at the start of the group of 16 columns.
79     uint32x4_t vpacc0x0123 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
80     uint32x4_t vpacc0x4567 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
81     uint32x4_t vpacc0x89AB = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
82     uint32x4_t vpacc0xCDEF = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
83     uint32x4_t vpacc1x0123 = vpacc0x0123;
84     uint32x4_t vpacc1x4567 = vpacc0x4567;
85     uint32x4_t vpacc1x89AB = vpacc0x89AB;
86     uint32x4_t vpacc1xCDEF = vpacc0xCDEF;
87     uint32x4_t vpacc2x0123 = vpacc0x0123;
88     uint32x4_t vpacc2x4567 = vpacc0x4567;
89     uint32x4_t vpacc2x89AB = vpacc0x89AB;
90     uint32x4_t vpacc2xCDEF = vpacc0xCDEF;
91     uint32x4_t vpacc3x0123 = vpacc0x0123;
92     uint32x4_t vpacc3x4567 = vpacc0x4567;
93     uint32x4_t vpacc3x89AB = vpacc0x89AB;
94     uint32x4_t vpacc3xCDEF = vpacc0xCDEF;
95     uint32x4_t vpacc4x0123 = vpacc0x0123;
96     uint32x4_t vpacc4x4567 = vpacc0x4567;
97     uint32x4_t vpacc4x89AB = vpacc0x89AB;
98     uint32x4_t vpacc4xCDEF = vpacc0xCDEF;
99     uint32x4_t vpacc5x0123 = vpacc0x0123;
100     uint32x4_t vpacc5x4567 = vpacc0x4567;
101     uint32x4_t vpacc5x89AB = vpacc0x89AB;
102     uint32x4_t vpacc5xCDEF = vpacc0xCDEF;
103     uint32x2_t vnacc0 = vmov_n_u32(0);
104     uint32x2_t vnacc1 = vmov_n_u32(0);
105     uint32x2_t vnacc2 = vmov_n_u32(0);
106     uint32x2_t vnacc3 = vmov_n_u32(0);
107     uint32x2_t vnacc4 = vmov_n_u32(0);
108     uint32x2_t vnacc5 = vmov_n_u32(0);
109 
110     // Inner accumulation loop along the 16 columns.
111     size_t k = kc;
112     // 2x partial unrolled loop to load 8 bytes at a time.
113     while (k >= 8 * sizeof(uint8_t)) {
114       // Load a 6x8 block of activations.
115       const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 8;
116       const uint8x8_t va1x01234567 = vld1_u8(a1); a1 += 8;
117       const uint8x8_t va2x01234567 = vld1_u8(a2); a2 += 8;
118       const uint8x8_t va3x01234567 = vld1_u8(a3); a3 += 8;
119       const uint8x8_t va4x01234567 = vld1_u8(a4); a4 += 8;
120       const uint8x8_t va5x01234567 = vld1_u8(a5); a5 += 8;
121 
122       // Load a 8x16 block of weights.
123       const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
124       const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
125       const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
126       const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
127       const uint8x16_t vb4567x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
128       const uint8x16_t vb4567x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
129       const uint8x16_t vb4567x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
130       const uint8x16_t vb4567xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
131 
132       // Multiply-accumulate: 6x8 * 8x16 --> 6x16.
133       vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
134       vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
135       vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
136       vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
137       vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
138       vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb4567x0123, va0x01234567, 1);
139       vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb4567x4567, va0x01234567, 1);
140       vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb4567x89AB, va0x01234567, 1);
141       vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb4567xCDEF, va0x01234567, 1);
142       vnacc1 = vdot_u32(vnacc1, va_zero_point, va1x01234567);
143       vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
144       vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
145       vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb0123x89AB, va1x01234567, 0);
146       vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb0123xCDEF, va1x01234567, 0);
147       vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb4567x0123, va1x01234567, 1);
148       vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb4567x4567, va1x01234567, 1);
149       vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb4567x89AB, va1x01234567, 1);
150       vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb4567xCDEF, va1x01234567, 1);
151       vnacc2 = vdot_u32(vnacc2, va_zero_point, va2x01234567);
152       vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
153       vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
154       vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb0123x89AB, va2x01234567, 0);
155       vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb0123xCDEF, va2x01234567, 0);
156       vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb4567x0123, va2x01234567, 1);
157       vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb4567x4567, va2x01234567, 1);
158       vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb4567x89AB, va2x01234567, 1);
159       vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb4567xCDEF, va2x01234567, 1);
160       vnacc3 = vdot_u32(vnacc3, va_zero_point, va3x01234567);
161       vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
162       vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
163       vpacc3x89AB = vdotq_lane_u32(vpacc3x89AB, vb0123x89AB, va3x01234567, 0);
164       vpacc3xCDEF = vdotq_lane_u32(vpacc3xCDEF, vb0123xCDEF, va3x01234567, 0);
165       vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb4567x0123, va3x01234567, 1);
166       vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb4567x4567, va3x01234567, 1);
167       vpacc3x89AB = vdotq_lane_u32(vpacc3x89AB, vb4567x89AB, va3x01234567, 1);
168       vpacc3xCDEF = vdotq_lane_u32(vpacc3xCDEF, vb4567xCDEF, va3x01234567, 1);
169       vnacc4 = vdot_u32(vnacc4, va_zero_point, va4x01234567);
170       vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb0123x0123, va4x01234567, 0);
171       vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb0123x4567, va4x01234567, 0);
172       vpacc4x89AB = vdotq_lane_u32(vpacc4x89AB, vb0123x89AB, va4x01234567, 0);
173       vpacc4xCDEF = vdotq_lane_u32(vpacc4xCDEF, vb0123xCDEF, va4x01234567, 0);
174       vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb4567x0123, va4x01234567, 1);
175       vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb4567x4567, va4x01234567, 1);
176       vpacc4x89AB = vdotq_lane_u32(vpacc4x89AB, vb4567x89AB, va4x01234567, 1);
177       vpacc4xCDEF = vdotq_lane_u32(vpacc4xCDEF, vb4567xCDEF, va4x01234567, 1);
178       vnacc5 = vdot_u32(vnacc5, va_zero_point, va5x01234567);
179       vpacc5x0123 = vdotq_lane_u32(vpacc5x0123, vb0123x0123, va5x01234567, 0);
180       vpacc5x4567 = vdotq_lane_u32(vpacc5x4567, vb0123x4567, va5x01234567, 0);
181       vpacc5x89AB = vdotq_lane_u32(vpacc5x89AB, vb0123x89AB, va5x01234567, 0);
182       vpacc5xCDEF = vdotq_lane_u32(vpacc5xCDEF, vb0123xCDEF, va5x01234567, 0);
183       vpacc5x0123 = vdotq_lane_u32(vpacc5x0123, vb4567x0123, va5x01234567, 1);
184       vpacc5x4567 = vdotq_lane_u32(vpacc5x4567, vb4567x4567, va5x01234567, 1);
185       vpacc5x89AB = vdotq_lane_u32(vpacc5x89AB, vb4567x89AB, va5x01234567, 1);
186       vpacc5xCDEF = vdotq_lane_u32(vpacc5xCDEF, vb4567xCDEF, va5x01234567, 1);
187 
188       k -= 8 * sizeof(uint8_t);
189     }
190     // Handle up to 4 final positions of `k`
191     if XNN_UNLIKELY(k != 0) {
192       // Load a 6x4 block of activations.
193       const uint8x8_t va0x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a0, vmov_n_u32(0), 0)); a0 += 4;
194       const uint8x8_t va1x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a1, vmov_n_u32(0), 0)); a1 += 4;
195       const uint8x8_t va2x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a2, vmov_n_u32(0), 0)); a2 += 4;
196       const uint8x8_t va3x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a3, vmov_n_u32(0), 0)); a3 += 4;
197       const uint8x8_t va4x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a4, vmov_n_u32(0), 0)); a4 += 4;
198       const uint8x8_t va5x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a5, vmov_n_u32(0), 0)); a5 += 4;
199 
200       // Load a 4x16 block of weights.
201       const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
202       const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
203       const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
204       const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
205 
206       // Multiply-accumulate: 6x4 * 4x16 --> 6x16.
207       vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
208       vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
209       vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
210       vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
211       vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
212       vnacc1 = vdot_u32(vnacc1, va_zero_point, va1x01234567);
213       vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
214       vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
215       vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb0123x89AB, va1x01234567, 0);
216       vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb0123xCDEF, va1x01234567, 0);
217       vnacc2 = vdot_u32(vnacc2, va_zero_point, va2x01234567);
218       vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
219       vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
220       vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb0123x89AB, va2x01234567, 0);
221       vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb0123xCDEF, va2x01234567, 0);
222       vnacc3 = vdot_u32(vnacc3, va_zero_point, va3x01234567);
223       vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
224       vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
225       vpacc3x89AB = vdotq_lane_u32(vpacc3x89AB, vb0123x89AB, va3x01234567, 0);
226       vpacc3xCDEF = vdotq_lane_u32(vpacc3xCDEF, vb0123xCDEF, va3x01234567, 0);
227       vnacc4 = vdot_u32(vnacc4, va_zero_point, va4x01234567);
228       vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb0123x0123, va4x01234567, 0);
229       vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb0123x4567, va4x01234567, 0);
230       vpacc4x89AB = vdotq_lane_u32(vpacc4x89AB, vb0123x89AB, va4x01234567, 0);
231       vpacc4xCDEF = vdotq_lane_u32(vpacc4xCDEF, vb0123xCDEF, va4x01234567, 0);
232       vnacc5 = vdot_u32(vnacc5, va_zero_point, va5x01234567);
233       vpacc5x0123 = vdotq_lane_u32(vpacc5x0123, vb0123x0123, va5x01234567, 0);
234       vpacc5x4567 = vdotq_lane_u32(vpacc5x4567, vb0123x4567, va5x01234567, 0);
235       vpacc5x89AB = vdotq_lane_u32(vpacc5x89AB, vb0123x89AB, va5x01234567, 0);
236       vpacc5xCDEF = vdotq_lane_u32(vpacc5xCDEF, vb0123xCDEF, va5x01234567, 0);
237     }
238 
239     // Subtract zero point from accumulators.
240     vnacc0 = vpadd_u32(vnacc0, vnacc0);
241     const uint32x4_t vnacc0x0123 = vcombine_u32(vnacc0, vnacc0);
242     int32x4_t vacc0x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x0123, vnacc0x0123));
243     int32x4_t vacc0x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x4567, vnacc0x0123));
244     int32x4_t vacc0x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc0x89AB, vnacc0x0123));
245     int32x4_t vacc0xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc0xCDEF, vnacc0x0123));
246     vnacc1 = vpadd_u32(vnacc1, vnacc1);
247     const uint32x4_t vnacc1x0123 = vcombine_u32(vnacc1, vnacc1);
248     int32x4_t vacc1x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x0123, vnacc1x0123));
249     int32x4_t vacc1x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x4567, vnacc1x0123));
250     int32x4_t vacc1x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc1x89AB, vnacc1x0123));
251     int32x4_t vacc1xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc1xCDEF, vnacc1x0123));
252     vnacc2 = vpadd_u32(vnacc2, vnacc2);
253     const uint32x4_t vnacc2x0123 = vcombine_u32(vnacc2, vnacc2);
254     int32x4_t vacc2x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x0123, vnacc2x0123));
255     int32x4_t vacc2x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x4567, vnacc2x0123));
256     int32x4_t vacc2x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc2x89AB, vnacc2x0123));
257     int32x4_t vacc2xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc2xCDEF, vnacc2x0123));
258     vnacc3 = vpadd_u32(vnacc3, vnacc3);
259     const uint32x4_t vnacc3x0123 = vcombine_u32(vnacc3, vnacc3);
260     int32x4_t vacc3x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x0123, vnacc3x0123));
261     int32x4_t vacc3x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x4567, vnacc3x0123));
262     int32x4_t vacc3x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc3x89AB, vnacc3x0123));
263     int32x4_t vacc3xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc3xCDEF, vnacc3x0123));
264     vnacc4 = vpadd_u32(vnacc4, vnacc4);
265     const uint32x4_t vnacc4x0123 = vcombine_u32(vnacc4, vnacc4);
266     int32x4_t vacc4x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc4x0123, vnacc4x0123));
267     int32x4_t vacc4x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc4x4567, vnacc4x0123));
268     int32x4_t vacc4x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc4x89AB, vnacc4x0123));
269     int32x4_t vacc4xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc4xCDEF, vnacc4x0123));
270     vnacc5 = vpadd_u32(vnacc5, vnacc5);
271     const uint32x4_t vnacc5x0123 = vcombine_u32(vnacc5, vnacc5);
272     int32x4_t vacc5x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc5x0123, vnacc5x0123));
273     int32x4_t vacc5x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc5x4567, vnacc5x0123));
274     int32x4_t vacc5x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc5x89AB, vnacc5x0123));
275     int32x4_t vacc5xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc5xCDEF, vnacc5x0123));
276 
277     const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
278     const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
279     const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
280 
281     vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
282     vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
283     vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
284     vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
285     vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
286     vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
287     vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
288     vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
289     vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
290     vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
291     vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
292     vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
293     vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
294     vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
295     vacc3x89AB = vshlq_s32(vacc3x89AB, vright_pre_shift);
296     vacc3xCDEF = vshlq_s32(vacc3xCDEF, vright_pre_shift);
297     vacc4x0123 = vshlq_s32(vacc4x0123, vright_pre_shift);
298     vacc4x4567 = vshlq_s32(vacc4x4567, vright_pre_shift);
299     vacc4x89AB = vshlq_s32(vacc4x89AB, vright_pre_shift);
300     vacc4xCDEF = vshlq_s32(vacc4xCDEF, vright_pre_shift);
301     vacc5x0123 = vshlq_s32(vacc5x0123, vright_pre_shift);
302     vacc5x4567 = vshlq_s32(vacc5x4567, vright_pre_shift);
303     vacc5x89AB = vshlq_s32(vacc5x89AB, vright_pre_shift);
304     vacc5xCDEF = vshlq_s32(vacc5xCDEF, vright_pre_shift);
305 
306     vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
307     vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
308     vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
309     vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
310     vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
311     vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
312     vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
313     vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
314     vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
315     vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
316     vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
317     vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
318     vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
319     vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
320     vacc3x89AB = vqdmulhq_s32(vacc3x89AB, vmultiplier);
321     vacc3xCDEF = vqdmulhq_s32(vacc3xCDEF, vmultiplier);
322     vacc4x0123 = vqdmulhq_s32(vacc4x0123, vmultiplier);
323     vacc4x4567 = vqdmulhq_s32(vacc4x4567, vmultiplier);
324     vacc4x89AB = vqdmulhq_s32(vacc4x89AB, vmultiplier);
325     vacc4xCDEF = vqdmulhq_s32(vacc4xCDEF, vmultiplier);
326     vacc5x0123 = vqdmulhq_s32(vacc5x0123, vmultiplier);
327     vacc5x4567 = vqdmulhq_s32(vacc5x4567, vmultiplier);
328     vacc5x89AB = vqdmulhq_s32(vacc5x89AB, vmultiplier);
329     vacc5xCDEF = vqdmulhq_s32(vacc5xCDEF, vmultiplier);
330 
331     vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
332     vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
333     vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
334     vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
335     vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
336     vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
337     vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
338     vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
339     vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
340     vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
341     vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
342     vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
343     vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
344     vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
345     vacc3x89AB = vrshlq_s32(vacc3x89AB, vright_post_shift);
346     vacc3xCDEF = vrshlq_s32(vacc3xCDEF, vright_post_shift);
347     vacc4x0123 = vrshlq_s32(vacc4x0123, vright_post_shift);
348     vacc4x4567 = vrshlq_s32(vacc4x4567, vright_post_shift);
349     vacc4x89AB = vrshlq_s32(vacc4x89AB, vright_post_shift);
350     vacc4xCDEF = vrshlq_s32(vacc4xCDEF, vright_post_shift);
351     vacc5x0123 = vrshlq_s32(vacc5x0123, vright_post_shift);
352     vacc5x4567 = vrshlq_s32(vacc5x4567, vright_post_shift);
353     vacc5x89AB = vrshlq_s32(vacc5x89AB, vright_post_shift);
354     vacc5xCDEF = vrshlq_s32(vacc5xCDEF, vright_post_shift);
355 
356     const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
357 #if XNN_ARCH_ARM64
358     const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
359     const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
360     const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
361     const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
362     const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
363     const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
364     const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
365     const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x89AB), vacc3xCDEF), voutput_zero_point);
366     const int16x8_t vacc4x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc4x0123), vacc4x4567), voutput_zero_point);
367     const int16x8_t vacc4x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc4x89AB), vacc4xCDEF), voutput_zero_point);
368     const int16x8_t vacc5x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc5x0123), vacc5x4567), voutput_zero_point);
369     const int16x8_t vacc5x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc5x89AB), vacc5xCDEF), voutput_zero_point);
370 
371     uint8x16_t vout0x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc0x89ABCDEF);
372     uint8x16_t vout1x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc1x01234567), vacc1x89ABCDEF);
373     uint8x16_t vout2x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc2x01234567), vacc2x89ABCDEF);
374     uint8x16_t vout3x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc3x01234567), vacc3x89ABCDEF);
375     uint8x16_t vout4x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc4x01234567), vacc4x89ABCDEF);
376     uint8x16_t vout5x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc5x01234567), vacc5x89ABCDEF);
377 #else
378     const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
379     const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
380     const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
381     const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
382     const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
383     const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
384     const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
385     const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x89AB), vqmovn_s32(vacc3xCDEF)), voutput_zero_point);
386     const int16x8_t vacc4x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc4x0123), vqmovn_s32(vacc4x4567)), voutput_zero_point);
387     const int16x8_t vacc4x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc4x89AB), vqmovn_s32(vacc4xCDEF)), voutput_zero_point);
388     const int16x8_t vacc5x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc5x0123), vqmovn_s32(vacc5x4567)), voutput_zero_point);
389     const int16x8_t vacc5x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc5x89AB), vqmovn_s32(vacc5xCDEF)), voutput_zero_point);
390 
391     uint8x16_t vout0x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc0x89ABCDEF));
392     uint8x16_t vout1x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc1x01234567), vqmovun_s16(vacc1x89ABCDEF));
393     uint8x16_t vout2x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc2x01234567), vqmovun_s16(vacc2x89ABCDEF));
394     uint8x16_t vout3x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc3x01234567), vqmovun_s16(vacc3x89ABCDEF));
395     uint8x16_t vout4x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc4x01234567), vqmovun_s16(vacc4x89ABCDEF));
396     uint8x16_t vout5x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc5x01234567), vqmovun_s16(vacc5x89ABCDEF));
397 #endif
398     const uint8x16_t voutput_min = vld1q_dup_u8(&params->rndnu_neon.output_min);
399     const uint8x16_t voutput_max = vld1q_dup_u8(&params->rndnu_neon.output_max);
400 
401     vout0x0123456789ABCDEF = vmaxq_u8(vout0x0123456789ABCDEF, voutput_min);
402     vout1x0123456789ABCDEF = vmaxq_u8(vout1x0123456789ABCDEF, voutput_min);
403     vout2x0123456789ABCDEF = vmaxq_u8(vout2x0123456789ABCDEF, voutput_min);
404     vout3x0123456789ABCDEF = vmaxq_u8(vout3x0123456789ABCDEF, voutput_min);
405     vout4x0123456789ABCDEF = vmaxq_u8(vout4x0123456789ABCDEF, voutput_min);
406     vout5x0123456789ABCDEF = vmaxq_u8(vout5x0123456789ABCDEF, voutput_min);
407 
408     vout0x0123456789ABCDEF = vminq_u8(vout0x0123456789ABCDEF, voutput_max);
409     vout1x0123456789ABCDEF = vminq_u8(vout1x0123456789ABCDEF, voutput_max);
410     vout2x0123456789ABCDEF = vminq_u8(vout2x0123456789ABCDEF, voutput_max);
411     vout3x0123456789ABCDEF = vminq_u8(vout3x0123456789ABCDEF, voutput_max);
412     vout4x0123456789ABCDEF = vminq_u8(vout4x0123456789ABCDEF, voutput_max);
413     vout5x0123456789ABCDEF = vminq_u8(vout5x0123456789ABCDEF, voutput_max);
414 
415     if (nc >= 16) {
416       vst1q_u8(c0 + 0, vout0x0123456789ABCDEF);
417       vst1q_u8(c1 + 0, vout1x0123456789ABCDEF);
418       vst1q_u8(c2 + 0, vout2x0123456789ABCDEF);
419       vst1q_u8(c3 + 0, vout3x0123456789ABCDEF);
420       vst1q_u8(c4 + 0, vout4x0123456789ABCDEF);
421       vst1q_u8(c5 + 0, vout5x0123456789ABCDEF);
422 
423       c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
424       c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
425       c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
426       c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
427       c4 = (uint8_t*) ((uintptr_t) c4 + cn_stride);
428       c5 = (uint8_t*) ((uintptr_t) c5 + cn_stride);
429 
430       a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
431       a1 = (const uint8_t*) ((uintptr_t) a1 - kc);
432       a2 = (const uint8_t*) ((uintptr_t) a2 - kc);
433       a3 = (const uint8_t*) ((uintptr_t) a3 - kc);
434       a4 = (const uint8_t*) ((uintptr_t) a4 - kc);
435       a5 = (const uint8_t*) ((uintptr_t) a5 - kc);
436 
437       nc -= 16;
438     } else {
439       uint8x16_t vout0x01234567_1x01234567 = vcombine_u8(vget_low_u8(vout0x0123456789ABCDEF), vget_low_u8(vout1x0123456789ABCDEF));
440       uint8x16_t vout2x01234567_3x01234567 = vcombine_u8(vget_low_u8(vout2x0123456789ABCDEF), vget_low_u8(vout3x0123456789ABCDEF));
441       uint8x16_t vout4x01234567_5x01234567 = vcombine_u8(vget_low_u8(vout4x0123456789ABCDEF), vget_low_u8(vout5x0123456789ABCDEF));
442       if (nc & 8) {
443         vst1_u8(c0, vget_low_u8(vout0x01234567_1x01234567)); c0 += 8;
444         vst1_u8(c1, vget_high_u8(vout0x01234567_1x01234567)); c1 += 8;
445         vst1_u8(c2, vget_low_u8(vout2x01234567_3x01234567)); c2 += 8;
446         vst1_u8(c3, vget_high_u8(vout2x01234567_3x01234567)); c3 += 8;
447         vst1_u8(c4, vget_low_u8(vout4x01234567_5x01234567)); c4 += 8;
448         vst1_u8(c5, vget_high_u8(vout4x01234567_5x01234567)); c5 += 8;
449         vout0x01234567_1x01234567 = vcombine_u8(vget_high_u8(vout0x0123456789ABCDEF), vget_high_u8(vout1x0123456789ABCDEF));
450         vout2x01234567_3x01234567 = vcombine_u8(vget_high_u8(vout2x0123456789ABCDEF), vget_high_u8(vout3x0123456789ABCDEF));
451         vout4x01234567_5x01234567 = vcombine_u8(vget_high_u8(vout4x0123456789ABCDEF), vget_high_u8(vout5x0123456789ABCDEF));
452       }
453       if (nc & 4) {
454         vst1q_lane_u32((void*) c0, vreinterpretq_u32_u8(vout0x01234567_1x01234567), 0); c0 += 4;
455         vst1q_lane_u32((void*) c1, vreinterpretq_u32_u8(vout0x01234567_1x01234567), 2); c1 += 4;
456         vst1q_lane_u32((void*) c2, vreinterpretq_u32_u8(vout2x01234567_3x01234567), 0); c2 += 4;
457         vst1q_lane_u32((void*) c3, vreinterpretq_u32_u8(vout2x01234567_3x01234567), 2); c3 += 4;
458         vst1q_lane_u32((void*) c4, vreinterpretq_u32_u8(vout4x01234567_5x01234567), 0); c4 += 4;
459         vst1q_lane_u32((void*) c5, vreinterpretq_u32_u8(vout4x01234567_5x01234567), 2); c5 += 4;
460         vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
461         vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
462         vout4x01234567_5x01234567 = vextq_u8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 4);
463       }
464       if (nc & 2) {
465         vst1q_lane_u16((void*) c0, vreinterpretq_u16_u8(vout0x01234567_1x01234567), 0); c0 += 2;
466         vst1q_lane_u16((void*) c1, vreinterpretq_u16_u8(vout0x01234567_1x01234567), 4); c1 += 2;
467         vst1q_lane_u16((void*) c2, vreinterpretq_u16_u8(vout2x01234567_3x01234567), 0); c2 += 2;
468         vst1q_lane_u16((void*) c3, vreinterpretq_u16_u8(vout2x01234567_3x01234567), 4); c3 += 2;
469         vst1q_lane_u16((void*) c4, vreinterpretq_u16_u8(vout4x01234567_5x01234567), 0); c4 += 2;
470         vst1q_lane_u16((void*) c5, vreinterpretq_u16_u8(vout4x01234567_5x01234567), 4); c5 += 2;
471         vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
472         vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
473         vout4x01234567_5x01234567 = vextq_u8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 2);
474       }
475       if (nc & 1) {
476         vst1q_lane_u8(c0, vout0x01234567_1x01234567, 0);
477         vst1q_lane_u8(c1, vout0x01234567_1x01234567, 8);
478         vst1q_lane_u8(c2, vout2x01234567_3x01234567, 0);
479         vst1q_lane_u8(c3, vout2x01234567_3x01234567, 8);
480         vst1q_lane_u8(c4, vout4x01234567_5x01234567, 0);
481         vst1q_lane_u8(c5, vout4x01234567_5x01234567, 8);
482       }
483 
484       nc = 0;
485     }
486   } while (nc != 0);
487 }
488