xref: /aosp_15_r20/external/XNNPACK/src/qu8-igemm/gen/6x16c4-minmax-rndnu-neondot.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qu8-igemm/c4-neondot.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/igemm.h>
15 #include <xnnpack/math.h>
16 
17 
xnn_qu8_igemm_minmax_rndnu_ukernel_6x16c4__neondot(size_t mr,size_t nc,size_t kc,size_t ks,const uint8_t ** restrict a,const void * restrict w,uint8_t * restrict c,size_t cm_stride,size_t cn_stride,size_t a_offset,const uint8_t * zero,const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qu8_igemm_minmax_rndnu_ukernel_6x16c4__neondot(
19     size_t mr,
20     size_t nc,
21     size_t kc,
22     size_t ks,
23     const uint8_t** restrict a,
24     const void* restrict w,
25     uint8_t* restrict c,
26     size_t cm_stride,
27     size_t cn_stride,
28     size_t a_offset,
29     const uint8_t* zero,
30     const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
31 {
32   assert(mr != 0);
33   assert(mr <= 6);
34   assert(nc != 0);
35   assert(kc != 0);
36   assert(ks != 0);
37   assert(ks % (6 * sizeof(void*)) == 0);
38   assert(a_offset % sizeof(uint8_t) == 0);
39   assert(a != NULL);
40   assert(w != NULL);
41   assert(c != NULL);
42 
43   kc = round_up_po2(kc, 4 * sizeof(uint8_t));
44   uint8_t* c0 = c;
45   uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
46   if XNN_UNPREDICTABLE(mr < 2) {
47     c1 = c0;
48   }
49   uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
50   if XNN_UNPREDICTABLE(mr <= 2) {
51     c2 = c1;
52   }
53   uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
54   if XNN_UNPREDICTABLE(mr < 4) {
55     c3 = c2;
56   }
57   uint8_t* c4 = (uint8_t*) ((uintptr_t) c3 + cm_stride);
58   if XNN_UNPREDICTABLE(mr <= 4) {
59     c4 = c3;
60   }
61   uint8_t* c5 = (uint8_t*) ((uintptr_t) c4 + cm_stride);
62   if XNN_UNPREDICTABLE(mr != 6) {
63     c5 = c4;
64   }
65 
66   const uint8x8_t va_zero_point = vld1_dup_u8(&params->rndnu_neon.kernel_zero_point[0]);
67 
68   do {
69     // Initialize accumulators with bias. 16 bias values are loaded from the
70     // weight matrix, at the start of the group of 16 columns.
71     uint32x4_t vpacc0x0123 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
72     uint32x4_t vpacc0x4567 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
73     uint32x4_t vpacc0x89AB = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
74     uint32x4_t vpacc0xCDEF = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
75     uint32x4_t vpacc1x0123 = vpacc0x0123;
76     uint32x4_t vpacc1x4567 = vpacc0x4567;
77     uint32x4_t vpacc1x89AB = vpacc0x89AB;
78     uint32x4_t vpacc1xCDEF = vpacc0xCDEF;
79     uint32x4_t vpacc2x0123 = vpacc0x0123;
80     uint32x4_t vpacc2x4567 = vpacc0x4567;
81     uint32x4_t vpacc2x89AB = vpacc0x89AB;
82     uint32x4_t vpacc2xCDEF = vpacc0xCDEF;
83     uint32x4_t vpacc3x0123 = vpacc0x0123;
84     uint32x4_t vpacc3x4567 = vpacc0x4567;
85     uint32x4_t vpacc3x89AB = vpacc0x89AB;
86     uint32x4_t vpacc3xCDEF = vpacc0xCDEF;
87     uint32x4_t vpacc4x0123 = vpacc0x0123;
88     uint32x4_t vpacc4x4567 = vpacc0x4567;
89     uint32x4_t vpacc4x89AB = vpacc0x89AB;
90     uint32x4_t vpacc4xCDEF = vpacc0xCDEF;
91     uint32x4_t vpacc5x0123 = vpacc0x0123;
92     uint32x4_t vpacc5x4567 = vpacc0x4567;
93     uint32x4_t vpacc5x89AB = vpacc0x89AB;
94     uint32x4_t vpacc5xCDEF = vpacc0xCDEF;
95     uint32x2_t vnacc0 = vmov_n_u32(0);
96     uint32x2_t vnacc1 = vmov_n_u32(0);
97     uint32x2_t vnacc2 = vmov_n_u32(0);
98     uint32x2_t vnacc3 = vmov_n_u32(0);
99     uint32x2_t vnacc4 = vmov_n_u32(0);
100     uint32x2_t vnacc5 = vmov_n_u32(0);
101 
102     size_t p = ks;
103     do {
104       const uint8_t* restrict a0 = a[0];
105       if XNN_UNPREDICTABLE(a0 != zero) {
106         a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
107       }
108       const uint8_t* restrict a1 = a[1];
109       if XNN_UNPREDICTABLE(a1 != zero) {
110         a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
111       }
112       const uint8_t* restrict a2 = a[2];
113       if XNN_UNPREDICTABLE(a2 != zero) {
114         a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
115       }
116       const uint8_t* restrict a3 = a[3];
117       if XNN_UNPREDICTABLE(a3 != zero) {
118         a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
119       }
120       const uint8_t* restrict a4 = a[4];
121       if XNN_UNPREDICTABLE(a4 != zero) {
122         a4 = (const uint8_t*) ((uintptr_t) a4 + a_offset);
123       }
124       const uint8_t* restrict a5 = a[5];
125       if XNN_UNPREDICTABLE(a5 != zero) {
126         a5 = (const uint8_t*) ((uintptr_t) a5 + a_offset);
127       }
128       a += 6;
129 
130       // Inner accumulation loop along the 16 columns.
131       size_t k = kc;
132       // 2x partial unrolled loop to load 8 bytes at a time.
133       while (k >= 8 * sizeof(uint8_t)) {
134         // Load a 6x8 block of activations.
135         const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 8;
136         const uint8x8_t va1x01234567 = vld1_u8(a1); a1 += 8;
137         const uint8x8_t va2x01234567 = vld1_u8(a2); a2 += 8;
138         const uint8x8_t va3x01234567 = vld1_u8(a3); a3 += 8;
139         const uint8x8_t va4x01234567 = vld1_u8(a4); a4 += 8;
140         const uint8x8_t va5x01234567 = vld1_u8(a5); a5 += 8;
141 
142         // Load a 8x16 block of weights.
143         const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
144         const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
145         const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
146         const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
147         const uint8x16_t vb4567x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
148         const uint8x16_t vb4567x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
149         const uint8x16_t vb4567x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
150         const uint8x16_t vb4567xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
151 
152         // Multiply-accumulate: 6x8 * 8x16 --> 6x16.
153         vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
154         vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
155         vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
156         vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
157         vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
158         vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb4567x0123, va0x01234567, 1);
159         vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb4567x4567, va0x01234567, 1);
160         vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb4567x89AB, va0x01234567, 1);
161         vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb4567xCDEF, va0x01234567, 1);
162         vnacc1 = vdot_u32(vnacc1, va_zero_point, va1x01234567);
163         vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
164         vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
165         vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb0123x89AB, va1x01234567, 0);
166         vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb0123xCDEF, va1x01234567, 0);
167         vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb4567x0123, va1x01234567, 1);
168         vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb4567x4567, va1x01234567, 1);
169         vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb4567x89AB, va1x01234567, 1);
170         vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb4567xCDEF, va1x01234567, 1);
171         vnacc2 = vdot_u32(vnacc2, va_zero_point, va2x01234567);
172         vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
173         vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
174         vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb0123x89AB, va2x01234567, 0);
175         vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb0123xCDEF, va2x01234567, 0);
176         vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb4567x0123, va2x01234567, 1);
177         vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb4567x4567, va2x01234567, 1);
178         vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb4567x89AB, va2x01234567, 1);
179         vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb4567xCDEF, va2x01234567, 1);
180         vnacc3 = vdot_u32(vnacc3, va_zero_point, va3x01234567);
181         vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
182         vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
183         vpacc3x89AB = vdotq_lane_u32(vpacc3x89AB, vb0123x89AB, va3x01234567, 0);
184         vpacc3xCDEF = vdotq_lane_u32(vpacc3xCDEF, vb0123xCDEF, va3x01234567, 0);
185         vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb4567x0123, va3x01234567, 1);
186         vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb4567x4567, va3x01234567, 1);
187         vpacc3x89AB = vdotq_lane_u32(vpacc3x89AB, vb4567x89AB, va3x01234567, 1);
188         vpacc3xCDEF = vdotq_lane_u32(vpacc3xCDEF, vb4567xCDEF, va3x01234567, 1);
189         vnacc4 = vdot_u32(vnacc4, va_zero_point, va4x01234567);
190         vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb0123x0123, va4x01234567, 0);
191         vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb0123x4567, va4x01234567, 0);
192         vpacc4x89AB = vdotq_lane_u32(vpacc4x89AB, vb0123x89AB, va4x01234567, 0);
193         vpacc4xCDEF = vdotq_lane_u32(vpacc4xCDEF, vb0123xCDEF, va4x01234567, 0);
194         vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb4567x0123, va4x01234567, 1);
195         vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb4567x4567, va4x01234567, 1);
196         vpacc4x89AB = vdotq_lane_u32(vpacc4x89AB, vb4567x89AB, va4x01234567, 1);
197         vpacc4xCDEF = vdotq_lane_u32(vpacc4xCDEF, vb4567xCDEF, va4x01234567, 1);
198         vnacc5 = vdot_u32(vnacc5, va_zero_point, va5x01234567);
199         vpacc5x0123 = vdotq_lane_u32(vpacc5x0123, vb0123x0123, va5x01234567, 0);
200         vpacc5x4567 = vdotq_lane_u32(vpacc5x4567, vb0123x4567, va5x01234567, 0);
201         vpacc5x89AB = vdotq_lane_u32(vpacc5x89AB, vb0123x89AB, va5x01234567, 0);
202         vpacc5xCDEF = vdotq_lane_u32(vpacc5xCDEF, vb0123xCDEF, va5x01234567, 0);
203         vpacc5x0123 = vdotq_lane_u32(vpacc5x0123, vb4567x0123, va5x01234567, 1);
204         vpacc5x4567 = vdotq_lane_u32(vpacc5x4567, vb4567x4567, va5x01234567, 1);
205         vpacc5x89AB = vdotq_lane_u32(vpacc5x89AB, vb4567x89AB, va5x01234567, 1);
206         vpacc5xCDEF = vdotq_lane_u32(vpacc5xCDEF, vb4567xCDEF, va5x01234567, 1);
207 
208         k -= 8 * sizeof(uint8_t);
209       }
210       // Handle up to 4 final positions of `k`
211       if XNN_UNLIKELY(k != 0) {
212         // Load a 6x4 block of activations.
213         const uint8x8_t va0x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a0, vmov_n_u32(0), 0)); a0 += 4;
214         const uint8x8_t va1x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a1, vmov_n_u32(0), 0)); a1 += 4;
215         const uint8x8_t va2x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a2, vmov_n_u32(0), 0)); a2 += 4;
216         const uint8x8_t va3x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a3, vmov_n_u32(0), 0)); a3 += 4;
217         const uint8x8_t va4x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a4, vmov_n_u32(0), 0)); a4 += 4;
218         const uint8x8_t va5x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a5, vmov_n_u32(0), 0)); a5 += 4;
219 
220         // Load a 4x16 block of weights.
221         const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
222         const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
223         const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
224         const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
225 
226         // Multiply-accumulate: 6x4 * 4x16 --> 6x16.
227         vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
228         vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
229         vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
230         vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
231         vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
232         vnacc1 = vdot_u32(vnacc1, va_zero_point, va1x01234567);
233         vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
234         vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
235         vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb0123x89AB, va1x01234567, 0);
236         vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb0123xCDEF, va1x01234567, 0);
237         vnacc2 = vdot_u32(vnacc2, va_zero_point, va2x01234567);
238         vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
239         vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
240         vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb0123x89AB, va2x01234567, 0);
241         vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb0123xCDEF, va2x01234567, 0);
242         vnacc3 = vdot_u32(vnacc3, va_zero_point, va3x01234567);
243         vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
244         vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
245         vpacc3x89AB = vdotq_lane_u32(vpacc3x89AB, vb0123x89AB, va3x01234567, 0);
246         vpacc3xCDEF = vdotq_lane_u32(vpacc3xCDEF, vb0123xCDEF, va3x01234567, 0);
247         vnacc4 = vdot_u32(vnacc4, va_zero_point, va4x01234567);
248         vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb0123x0123, va4x01234567, 0);
249         vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb0123x4567, va4x01234567, 0);
250         vpacc4x89AB = vdotq_lane_u32(vpacc4x89AB, vb0123x89AB, va4x01234567, 0);
251         vpacc4xCDEF = vdotq_lane_u32(vpacc4xCDEF, vb0123xCDEF, va4x01234567, 0);
252         vnacc5 = vdot_u32(vnacc5, va_zero_point, va5x01234567);
253         vpacc5x0123 = vdotq_lane_u32(vpacc5x0123, vb0123x0123, va5x01234567, 0);
254         vpacc5x4567 = vdotq_lane_u32(vpacc5x4567, vb0123x4567, va5x01234567, 0);
255         vpacc5x89AB = vdotq_lane_u32(vpacc5x89AB, vb0123x89AB, va5x01234567, 0);
256         vpacc5xCDEF = vdotq_lane_u32(vpacc5xCDEF, vb0123xCDEF, va5x01234567, 0);
257       }
258       p -= 6 * sizeof(void*);
259     } while (p != 0);
260 
261     // Subtract zero point from accumulators.
262     vnacc0 = vpadd_u32(vnacc0, vnacc0);
263     const uint32x4_t vnacc0x0123 = vcombine_u32(vnacc0, vnacc0);
264     int32x4_t vacc0x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x0123, vnacc0x0123));
265     int32x4_t vacc0x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x4567, vnacc0x0123));
266     int32x4_t vacc0x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc0x89AB, vnacc0x0123));
267     int32x4_t vacc0xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc0xCDEF, vnacc0x0123));
268     vnacc1 = vpadd_u32(vnacc1, vnacc1);
269     const uint32x4_t vnacc1x0123 = vcombine_u32(vnacc1, vnacc1);
270     int32x4_t vacc1x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x0123, vnacc1x0123));
271     int32x4_t vacc1x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x4567, vnacc1x0123));
272     int32x4_t vacc1x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc1x89AB, vnacc1x0123));
273     int32x4_t vacc1xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc1xCDEF, vnacc1x0123));
274     vnacc2 = vpadd_u32(vnacc2, vnacc2);
275     const uint32x4_t vnacc2x0123 = vcombine_u32(vnacc2, vnacc2);
276     int32x4_t vacc2x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x0123, vnacc2x0123));
277     int32x4_t vacc2x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x4567, vnacc2x0123));
278     int32x4_t vacc2x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc2x89AB, vnacc2x0123));
279     int32x4_t vacc2xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc2xCDEF, vnacc2x0123));
280     vnacc3 = vpadd_u32(vnacc3, vnacc3);
281     const uint32x4_t vnacc3x0123 = vcombine_u32(vnacc3, vnacc3);
282     int32x4_t vacc3x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x0123, vnacc3x0123));
283     int32x4_t vacc3x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x4567, vnacc3x0123));
284     int32x4_t vacc3x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc3x89AB, vnacc3x0123));
285     int32x4_t vacc3xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc3xCDEF, vnacc3x0123));
286     vnacc4 = vpadd_u32(vnacc4, vnacc4);
287     const uint32x4_t vnacc4x0123 = vcombine_u32(vnacc4, vnacc4);
288     int32x4_t vacc4x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc4x0123, vnacc4x0123));
289     int32x4_t vacc4x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc4x4567, vnacc4x0123));
290     int32x4_t vacc4x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc4x89AB, vnacc4x0123));
291     int32x4_t vacc4xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc4xCDEF, vnacc4x0123));
292     vnacc5 = vpadd_u32(vnacc5, vnacc5);
293     const uint32x4_t vnacc5x0123 = vcombine_u32(vnacc5, vnacc5);
294     int32x4_t vacc5x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc5x0123, vnacc5x0123));
295     int32x4_t vacc5x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc5x4567, vnacc5x0123));
296     int32x4_t vacc5x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc5x89AB, vnacc5x0123));
297     int32x4_t vacc5xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc5xCDEF, vnacc5x0123));
298 
299     const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
300     const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
301     const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
302 
303     vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
304     vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
305     vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
306     vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
307     vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
308     vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
309     vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
310     vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
311     vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
312     vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
313     vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
314     vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
315     vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
316     vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
317     vacc3x89AB = vshlq_s32(vacc3x89AB, vright_pre_shift);
318     vacc3xCDEF = vshlq_s32(vacc3xCDEF, vright_pre_shift);
319     vacc4x0123 = vshlq_s32(vacc4x0123, vright_pre_shift);
320     vacc4x4567 = vshlq_s32(vacc4x4567, vright_pre_shift);
321     vacc4x89AB = vshlq_s32(vacc4x89AB, vright_pre_shift);
322     vacc4xCDEF = vshlq_s32(vacc4xCDEF, vright_pre_shift);
323     vacc5x0123 = vshlq_s32(vacc5x0123, vright_pre_shift);
324     vacc5x4567 = vshlq_s32(vacc5x4567, vright_pre_shift);
325     vacc5x89AB = vshlq_s32(vacc5x89AB, vright_pre_shift);
326     vacc5xCDEF = vshlq_s32(vacc5xCDEF, vright_pre_shift);
327 
328     vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
329     vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
330     vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
331     vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
332     vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
333     vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
334     vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
335     vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
336     vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
337     vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
338     vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
339     vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
340     vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
341     vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
342     vacc3x89AB = vqdmulhq_s32(vacc3x89AB, vmultiplier);
343     vacc3xCDEF = vqdmulhq_s32(vacc3xCDEF, vmultiplier);
344     vacc4x0123 = vqdmulhq_s32(vacc4x0123, vmultiplier);
345     vacc4x4567 = vqdmulhq_s32(vacc4x4567, vmultiplier);
346     vacc4x89AB = vqdmulhq_s32(vacc4x89AB, vmultiplier);
347     vacc4xCDEF = vqdmulhq_s32(vacc4xCDEF, vmultiplier);
348     vacc5x0123 = vqdmulhq_s32(vacc5x0123, vmultiplier);
349     vacc5x4567 = vqdmulhq_s32(vacc5x4567, vmultiplier);
350     vacc5x89AB = vqdmulhq_s32(vacc5x89AB, vmultiplier);
351     vacc5xCDEF = vqdmulhq_s32(vacc5xCDEF, vmultiplier);
352 
353     vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
354     vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
355     vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
356     vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
357     vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
358     vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
359     vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
360     vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
361     vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
362     vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
363     vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
364     vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
365     vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
366     vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
367     vacc3x89AB = vrshlq_s32(vacc3x89AB, vright_post_shift);
368     vacc3xCDEF = vrshlq_s32(vacc3xCDEF, vright_post_shift);
369     vacc4x0123 = vrshlq_s32(vacc4x0123, vright_post_shift);
370     vacc4x4567 = vrshlq_s32(vacc4x4567, vright_post_shift);
371     vacc4x89AB = vrshlq_s32(vacc4x89AB, vright_post_shift);
372     vacc4xCDEF = vrshlq_s32(vacc4xCDEF, vright_post_shift);
373     vacc5x0123 = vrshlq_s32(vacc5x0123, vright_post_shift);
374     vacc5x4567 = vrshlq_s32(vacc5x4567, vright_post_shift);
375     vacc5x89AB = vrshlq_s32(vacc5x89AB, vright_post_shift);
376     vacc5xCDEF = vrshlq_s32(vacc5xCDEF, vright_post_shift);
377 
378     const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
379 #if XNN_ARCH_ARM64
380     const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
381     const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
382     const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
383     const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
384     const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
385     const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
386     const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
387     const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x89AB), vacc3xCDEF), voutput_zero_point);
388     const int16x8_t vacc4x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc4x0123), vacc4x4567), voutput_zero_point);
389     const int16x8_t vacc4x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc4x89AB), vacc4xCDEF), voutput_zero_point);
390     const int16x8_t vacc5x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc5x0123), vacc5x4567), voutput_zero_point);
391     const int16x8_t vacc5x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc5x89AB), vacc5xCDEF), voutput_zero_point);
392 
393     uint8x16_t vout0x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc0x89ABCDEF);
394     uint8x16_t vout1x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc1x01234567), vacc1x89ABCDEF);
395     uint8x16_t vout2x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc2x01234567), vacc2x89ABCDEF);
396     uint8x16_t vout3x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc3x01234567), vacc3x89ABCDEF);
397     uint8x16_t vout4x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc4x01234567), vacc4x89ABCDEF);
398     uint8x16_t vout5x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc5x01234567), vacc5x89ABCDEF);
399 #else
400     const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
401     const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
402     const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
403     const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
404     const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
405     const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
406     const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
407     const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x89AB), vqmovn_s32(vacc3xCDEF)), voutput_zero_point);
408     const int16x8_t vacc4x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc4x0123), vqmovn_s32(vacc4x4567)), voutput_zero_point);
409     const int16x8_t vacc4x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc4x89AB), vqmovn_s32(vacc4xCDEF)), voutput_zero_point);
410     const int16x8_t vacc5x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc5x0123), vqmovn_s32(vacc5x4567)), voutput_zero_point);
411     const int16x8_t vacc5x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc5x89AB), vqmovn_s32(vacc5xCDEF)), voutput_zero_point);
412 
413     uint8x16_t vout0x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc0x89ABCDEF));
414     uint8x16_t vout1x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc1x01234567), vqmovun_s16(vacc1x89ABCDEF));
415     uint8x16_t vout2x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc2x01234567), vqmovun_s16(vacc2x89ABCDEF));
416     uint8x16_t vout3x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc3x01234567), vqmovun_s16(vacc3x89ABCDEF));
417     uint8x16_t vout4x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc4x01234567), vqmovun_s16(vacc4x89ABCDEF));
418     uint8x16_t vout5x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc5x01234567), vqmovun_s16(vacc5x89ABCDEF));
419 #endif
420     const uint8x16_t voutput_min = vld1q_dup_u8(&params->rndnu_neon.output_min);
421     const uint8x16_t voutput_max = vld1q_dup_u8(&params->rndnu_neon.output_max);
422 
423     vout0x0123456789ABCDEF = vmaxq_u8(vout0x0123456789ABCDEF, voutput_min);
424     vout1x0123456789ABCDEF = vmaxq_u8(vout1x0123456789ABCDEF, voutput_min);
425     vout2x0123456789ABCDEF = vmaxq_u8(vout2x0123456789ABCDEF, voutput_min);
426     vout3x0123456789ABCDEF = vmaxq_u8(vout3x0123456789ABCDEF, voutput_min);
427     vout4x0123456789ABCDEF = vmaxq_u8(vout4x0123456789ABCDEF, voutput_min);
428     vout5x0123456789ABCDEF = vmaxq_u8(vout5x0123456789ABCDEF, voutput_min);
429 
430     vout0x0123456789ABCDEF = vminq_u8(vout0x0123456789ABCDEF, voutput_max);
431     vout1x0123456789ABCDEF = vminq_u8(vout1x0123456789ABCDEF, voutput_max);
432     vout2x0123456789ABCDEF = vminq_u8(vout2x0123456789ABCDEF, voutput_max);
433     vout3x0123456789ABCDEF = vminq_u8(vout3x0123456789ABCDEF, voutput_max);
434     vout4x0123456789ABCDEF = vminq_u8(vout4x0123456789ABCDEF, voutput_max);
435     vout5x0123456789ABCDEF = vminq_u8(vout5x0123456789ABCDEF, voutput_max);
436 
437     if (nc >= 16) {
438       vst1q_u8(c5 + 0, vout5x0123456789ABCDEF);
439       vst1q_u8(c4 + 0, vout4x0123456789ABCDEF);
440       vst1q_u8(c3 + 0, vout3x0123456789ABCDEF);
441       vst1q_u8(c2 + 0, vout2x0123456789ABCDEF);
442       vst1q_u8(c1 + 0, vout1x0123456789ABCDEF);
443       vst1q_u8(c0 + 0, vout0x0123456789ABCDEF);
444 
445       c5 = (uint8_t*) ((uintptr_t) c5 + cn_stride);
446       c4 = (uint8_t*) ((uintptr_t) c4 + cn_stride);
447       c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
448       c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
449       c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
450       c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
451 
452       a = (const uint8_t**restrict) ((uintptr_t) a - ks);
453 
454       nc -= 16;
455     } else {
456       uint8x16_t vout4x01234567_5x01234567 = vcombine_u8(vget_low_u8(vout4x0123456789ABCDEF), vget_low_u8(vout5x0123456789ABCDEF));
457       uint8x16_t vout2x01234567_3x01234567 = vcombine_u8(vget_low_u8(vout2x0123456789ABCDEF), vget_low_u8(vout3x0123456789ABCDEF));
458       uint8x16_t vout0x01234567_1x01234567 = vcombine_u8(vget_low_u8(vout0x0123456789ABCDEF), vget_low_u8(vout1x0123456789ABCDEF));
459       if (nc & 8) {
460         vst1_u8(c5, vget_high_u8(vout4x01234567_5x01234567)); c5 += 8;
461         vst1_u8(c4, vget_low_u8(vout4x01234567_5x01234567)); c4 += 8;
462         vst1_u8(c3, vget_high_u8(vout2x01234567_3x01234567)); c3 += 8;
463         vst1_u8(c2, vget_low_u8(vout2x01234567_3x01234567)); c2 += 8;
464         vst1_u8(c1, vget_high_u8(vout0x01234567_1x01234567)); c1 += 8;
465         vst1_u8(c0, vget_low_u8(vout0x01234567_1x01234567)); c0 += 8;
466         vout4x01234567_5x01234567 = vcombine_u8(vget_high_u8(vout4x0123456789ABCDEF), vget_high_u8(vout5x0123456789ABCDEF));
467         vout2x01234567_3x01234567 = vcombine_u8(vget_high_u8(vout2x0123456789ABCDEF), vget_high_u8(vout3x0123456789ABCDEF));
468         vout0x01234567_1x01234567 = vcombine_u8(vget_high_u8(vout0x0123456789ABCDEF), vget_high_u8(vout1x0123456789ABCDEF));
469       }
470       if (nc & 4) {
471         vst1q_lane_u32((void*) c5, vreinterpretq_u32_u8(vout4x01234567_5x01234567), 2); c5 += 4;
472         vst1q_lane_u32((void*) c4, vreinterpretq_u32_u8(vout4x01234567_5x01234567), 0); c4 += 4;
473         vst1q_lane_u32((void*) c3, vreinterpretq_u32_u8(vout2x01234567_3x01234567), 2); c3 += 4;
474         vst1q_lane_u32((void*) c2, vreinterpretq_u32_u8(vout2x01234567_3x01234567), 0); c2 += 4;
475         vst1q_lane_u32((void*) c1, vreinterpretq_u32_u8(vout0x01234567_1x01234567), 2); c1 += 4;
476         vst1q_lane_u32((void*) c0, vreinterpretq_u32_u8(vout0x01234567_1x01234567), 0); c0 += 4;
477         vout4x01234567_5x01234567 = vextq_u8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 4);
478         vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
479         vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
480       }
481       if (nc & 2) {
482         vst1q_lane_u16((void*) c5, vreinterpretq_u16_u8(vout4x01234567_5x01234567), 4); c5 += 2;
483         vst1q_lane_u16((void*) c4, vreinterpretq_u16_u8(vout4x01234567_5x01234567), 0); c4 += 2;
484         vst1q_lane_u16((void*) c3, vreinterpretq_u16_u8(vout2x01234567_3x01234567), 4); c3 += 2;
485         vst1q_lane_u16((void*) c2, vreinterpretq_u16_u8(vout2x01234567_3x01234567), 0); c2 += 2;
486         vst1q_lane_u16((void*) c1, vreinterpretq_u16_u8(vout0x01234567_1x01234567), 4); c1 += 2;
487         vst1q_lane_u16((void*) c0, vreinterpretq_u16_u8(vout0x01234567_1x01234567), 0); c0 += 2;
488         vout4x01234567_5x01234567 = vextq_u8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 2);
489         vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
490         vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
491       }
492       if (nc & 1) {
493         vst1q_lane_u8(c5, vout4x01234567_5x01234567, 8);
494         vst1q_lane_u8(c4, vout4x01234567_5x01234567, 0);
495         vst1q_lane_u8(c3, vout2x01234567_3x01234567, 8);
496         vst1q_lane_u8(c2, vout2x01234567_3x01234567, 0);
497         vst1q_lane_u8(c1, vout0x01234567_1x01234567, 8);
498         vst1q_lane_u8(c0, vout0x01234567_1x01234567, 0);
499       }
500 
501       nc = 0;
502     }
503   } while (nc != 0);
504 }
505