xref: /aosp_15_r20/external/XNNPACK/src/qu8-igemm/gen/5x16c4-minmax-rndnu-neondot.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qu8-igemm/c4-neondot.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/igemm.h>
15 #include <xnnpack/math.h>
16 
17 
xnn_qu8_igemm_minmax_rndnu_ukernel_5x16c4__neondot(size_t mr,size_t nc,size_t kc,size_t ks,const uint8_t ** restrict a,const void * restrict w,uint8_t * restrict c,size_t cm_stride,size_t cn_stride,size_t a_offset,const uint8_t * zero,const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qu8_igemm_minmax_rndnu_ukernel_5x16c4__neondot(
19     size_t mr,
20     size_t nc,
21     size_t kc,
22     size_t ks,
23     const uint8_t** restrict a,
24     const void* restrict w,
25     uint8_t* restrict c,
26     size_t cm_stride,
27     size_t cn_stride,
28     size_t a_offset,
29     const uint8_t* zero,
30     const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
31 {
32   assert(mr != 0);
33   assert(mr <= 5);
34   assert(nc != 0);
35   assert(kc != 0);
36   assert(ks != 0);
37   assert(ks % (5 * sizeof(void*)) == 0);
38   assert(a_offset % sizeof(uint8_t) == 0);
39   assert(a != NULL);
40   assert(w != NULL);
41   assert(c != NULL);
42 
43   kc = round_up_po2(kc, 4 * sizeof(uint8_t));
44   uint8_t* c0 = c;
45   uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
46   if XNN_UNPREDICTABLE(mr < 2) {
47     c1 = c0;
48   }
49   uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
50   if XNN_UNPREDICTABLE(mr <= 2) {
51     c2 = c1;
52   }
53   uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
54   if XNN_UNPREDICTABLE(mr < 4) {
55     c3 = c2;
56   }
57   uint8_t* c4 = (uint8_t*) ((uintptr_t) c3 + cm_stride);
58   if XNN_UNPREDICTABLE(mr <= 4) {
59     c4 = c3;
60   }
61 
62   const uint8x8_t va_zero_point = vld1_dup_u8(&params->rndnu_neon.kernel_zero_point[0]);
63 
64   do {
65     // Initialize accumulators with bias. 16 bias values are loaded from the
66     // weight matrix, at the start of the group of 16 columns.
67     uint32x4_t vpacc0x0123 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
68     uint32x4_t vpacc0x4567 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
69     uint32x4_t vpacc0x89AB = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
70     uint32x4_t vpacc0xCDEF = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
71     uint32x4_t vpacc1x0123 = vpacc0x0123;
72     uint32x4_t vpacc1x4567 = vpacc0x4567;
73     uint32x4_t vpacc1x89AB = vpacc0x89AB;
74     uint32x4_t vpacc1xCDEF = vpacc0xCDEF;
75     uint32x4_t vpacc2x0123 = vpacc0x0123;
76     uint32x4_t vpacc2x4567 = vpacc0x4567;
77     uint32x4_t vpacc2x89AB = vpacc0x89AB;
78     uint32x4_t vpacc2xCDEF = vpacc0xCDEF;
79     uint32x4_t vpacc3x0123 = vpacc0x0123;
80     uint32x4_t vpacc3x4567 = vpacc0x4567;
81     uint32x4_t vpacc3x89AB = vpacc0x89AB;
82     uint32x4_t vpacc3xCDEF = vpacc0xCDEF;
83     uint32x4_t vpacc4x0123 = vpacc0x0123;
84     uint32x4_t vpacc4x4567 = vpacc0x4567;
85     uint32x4_t vpacc4x89AB = vpacc0x89AB;
86     uint32x4_t vpacc4xCDEF = vpacc0xCDEF;
87     uint32x2_t vnacc0 = vmov_n_u32(0);
88     uint32x2_t vnacc1 = vmov_n_u32(0);
89     uint32x2_t vnacc2 = vmov_n_u32(0);
90     uint32x2_t vnacc3 = vmov_n_u32(0);
91     uint32x2_t vnacc4 = vmov_n_u32(0);
92 
93     size_t p = ks;
94     do {
95       const uint8_t* restrict a0 = a[0];
96       if XNN_UNPREDICTABLE(a0 != zero) {
97         a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
98       }
99       const uint8_t* restrict a1 = a[1];
100       if XNN_UNPREDICTABLE(a1 != zero) {
101         a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
102       }
103       const uint8_t* restrict a2 = a[2];
104       if XNN_UNPREDICTABLE(a2 != zero) {
105         a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
106       }
107       const uint8_t* restrict a3 = a[3];
108       if XNN_UNPREDICTABLE(a3 != zero) {
109         a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
110       }
111       const uint8_t* restrict a4 = a[4];
112       if XNN_UNPREDICTABLE(a4 != zero) {
113         a4 = (const uint8_t*) ((uintptr_t) a4 + a_offset);
114       }
115       a += 5;
116 
117       // Inner accumulation loop along the 16 columns.
118       size_t k = kc;
119       // 2x partial unrolled loop to load 8 bytes at a time.
120       while (k >= 8 * sizeof(uint8_t)) {
121         // Load a 5x8 block of activations.
122         const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 8;
123         const uint8x8_t va1x01234567 = vld1_u8(a1); a1 += 8;
124         const uint8x8_t va2x01234567 = vld1_u8(a2); a2 += 8;
125         const uint8x8_t va3x01234567 = vld1_u8(a3); a3 += 8;
126         const uint8x8_t va4x01234567 = vld1_u8(a4); a4 += 8;
127 
128         // Load a 8x16 block of weights.
129         const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
130         const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
131         const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
132         const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
133         const uint8x16_t vb4567x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
134         const uint8x16_t vb4567x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
135         const uint8x16_t vb4567x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
136         const uint8x16_t vb4567xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
137 
138         // Multiply-accumulate: 5x8 * 8x16 --> 5x16.
139         vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
140         vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
141         vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
142         vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
143         vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
144         vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb4567x0123, va0x01234567, 1);
145         vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb4567x4567, va0x01234567, 1);
146         vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb4567x89AB, va0x01234567, 1);
147         vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb4567xCDEF, va0x01234567, 1);
148         vnacc1 = vdot_u32(vnacc1, va_zero_point, va1x01234567);
149         vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
150         vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
151         vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb0123x89AB, va1x01234567, 0);
152         vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb0123xCDEF, va1x01234567, 0);
153         vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb4567x0123, va1x01234567, 1);
154         vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb4567x4567, va1x01234567, 1);
155         vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb4567x89AB, va1x01234567, 1);
156         vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb4567xCDEF, va1x01234567, 1);
157         vnacc2 = vdot_u32(vnacc2, va_zero_point, va2x01234567);
158         vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
159         vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
160         vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb0123x89AB, va2x01234567, 0);
161         vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb0123xCDEF, va2x01234567, 0);
162         vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb4567x0123, va2x01234567, 1);
163         vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb4567x4567, va2x01234567, 1);
164         vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb4567x89AB, va2x01234567, 1);
165         vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb4567xCDEF, va2x01234567, 1);
166         vnacc3 = vdot_u32(vnacc3, va_zero_point, va3x01234567);
167         vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
168         vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
169         vpacc3x89AB = vdotq_lane_u32(vpacc3x89AB, vb0123x89AB, va3x01234567, 0);
170         vpacc3xCDEF = vdotq_lane_u32(vpacc3xCDEF, vb0123xCDEF, va3x01234567, 0);
171         vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb4567x0123, va3x01234567, 1);
172         vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb4567x4567, va3x01234567, 1);
173         vpacc3x89AB = vdotq_lane_u32(vpacc3x89AB, vb4567x89AB, va3x01234567, 1);
174         vpacc3xCDEF = vdotq_lane_u32(vpacc3xCDEF, vb4567xCDEF, va3x01234567, 1);
175         vnacc4 = vdot_u32(vnacc4, va_zero_point, va4x01234567);
176         vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb0123x0123, va4x01234567, 0);
177         vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb0123x4567, va4x01234567, 0);
178         vpacc4x89AB = vdotq_lane_u32(vpacc4x89AB, vb0123x89AB, va4x01234567, 0);
179         vpacc4xCDEF = vdotq_lane_u32(vpacc4xCDEF, vb0123xCDEF, va4x01234567, 0);
180         vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb4567x0123, va4x01234567, 1);
181         vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb4567x4567, va4x01234567, 1);
182         vpacc4x89AB = vdotq_lane_u32(vpacc4x89AB, vb4567x89AB, va4x01234567, 1);
183         vpacc4xCDEF = vdotq_lane_u32(vpacc4xCDEF, vb4567xCDEF, va4x01234567, 1);
184 
185         k -= 8 * sizeof(uint8_t);
186       }
187       // Handle up to 4 final positions of `k`
188       if XNN_UNLIKELY(k != 0) {
189         // Load a 5x4 block of activations.
190         const uint8x8_t va0x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a0, vmov_n_u32(0), 0)); a0 += 4;
191         const uint8x8_t va1x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a1, vmov_n_u32(0), 0)); a1 += 4;
192         const uint8x8_t va2x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a2, vmov_n_u32(0), 0)); a2 += 4;
193         const uint8x8_t va3x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a3, vmov_n_u32(0), 0)); a3 += 4;
194         const uint8x8_t va4x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a4, vmov_n_u32(0), 0)); a4 += 4;
195 
196         // Load a 4x16 block of weights.
197         const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
198         const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
199         const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
200         const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
201 
202         // Multiply-accumulate: 5x4 * 4x16 --> 5x16.
203         vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
204         vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
205         vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
206         vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
207         vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
208         vnacc1 = vdot_u32(vnacc1, va_zero_point, va1x01234567);
209         vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
210         vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
211         vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb0123x89AB, va1x01234567, 0);
212         vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb0123xCDEF, va1x01234567, 0);
213         vnacc2 = vdot_u32(vnacc2, va_zero_point, va2x01234567);
214         vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
215         vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
216         vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb0123x89AB, va2x01234567, 0);
217         vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb0123xCDEF, va2x01234567, 0);
218         vnacc3 = vdot_u32(vnacc3, va_zero_point, va3x01234567);
219         vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
220         vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
221         vpacc3x89AB = vdotq_lane_u32(vpacc3x89AB, vb0123x89AB, va3x01234567, 0);
222         vpacc3xCDEF = vdotq_lane_u32(vpacc3xCDEF, vb0123xCDEF, va3x01234567, 0);
223         vnacc4 = vdot_u32(vnacc4, va_zero_point, va4x01234567);
224         vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb0123x0123, va4x01234567, 0);
225         vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb0123x4567, va4x01234567, 0);
226         vpacc4x89AB = vdotq_lane_u32(vpacc4x89AB, vb0123x89AB, va4x01234567, 0);
227         vpacc4xCDEF = vdotq_lane_u32(vpacc4xCDEF, vb0123xCDEF, va4x01234567, 0);
228       }
229       p -= 5 * sizeof(void*);
230     } while (p != 0);
231 
232     // Subtract zero point from accumulators.
233     vnacc0 = vpadd_u32(vnacc0, vnacc0);
234     const uint32x4_t vnacc0x0123 = vcombine_u32(vnacc0, vnacc0);
235     int32x4_t vacc0x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x0123, vnacc0x0123));
236     int32x4_t vacc0x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x4567, vnacc0x0123));
237     int32x4_t vacc0x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc0x89AB, vnacc0x0123));
238     int32x4_t vacc0xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc0xCDEF, vnacc0x0123));
239     vnacc1 = vpadd_u32(vnacc1, vnacc1);
240     const uint32x4_t vnacc1x0123 = vcombine_u32(vnacc1, vnacc1);
241     int32x4_t vacc1x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x0123, vnacc1x0123));
242     int32x4_t vacc1x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x4567, vnacc1x0123));
243     int32x4_t vacc1x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc1x89AB, vnacc1x0123));
244     int32x4_t vacc1xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc1xCDEF, vnacc1x0123));
245     vnacc2 = vpadd_u32(vnacc2, vnacc2);
246     const uint32x4_t vnacc2x0123 = vcombine_u32(vnacc2, vnacc2);
247     int32x4_t vacc2x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x0123, vnacc2x0123));
248     int32x4_t vacc2x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x4567, vnacc2x0123));
249     int32x4_t vacc2x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc2x89AB, vnacc2x0123));
250     int32x4_t vacc2xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc2xCDEF, vnacc2x0123));
251     vnacc3 = vpadd_u32(vnacc3, vnacc3);
252     const uint32x4_t vnacc3x0123 = vcombine_u32(vnacc3, vnacc3);
253     int32x4_t vacc3x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x0123, vnacc3x0123));
254     int32x4_t vacc3x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x4567, vnacc3x0123));
255     int32x4_t vacc3x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc3x89AB, vnacc3x0123));
256     int32x4_t vacc3xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc3xCDEF, vnacc3x0123));
257     vnacc4 = vpadd_u32(vnacc4, vnacc4);
258     const uint32x4_t vnacc4x0123 = vcombine_u32(vnacc4, vnacc4);
259     int32x4_t vacc4x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc4x0123, vnacc4x0123));
260     int32x4_t vacc4x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc4x4567, vnacc4x0123));
261     int32x4_t vacc4x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc4x89AB, vnacc4x0123));
262     int32x4_t vacc4xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc4xCDEF, vnacc4x0123));
263 
264     const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
265     const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
266     const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
267 
268     vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
269     vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
270     vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
271     vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
272     vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
273     vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
274     vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
275     vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
276     vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
277     vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
278     vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
279     vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
280     vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
281     vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
282     vacc3x89AB = vshlq_s32(vacc3x89AB, vright_pre_shift);
283     vacc3xCDEF = vshlq_s32(vacc3xCDEF, vright_pre_shift);
284     vacc4x0123 = vshlq_s32(vacc4x0123, vright_pre_shift);
285     vacc4x4567 = vshlq_s32(vacc4x4567, vright_pre_shift);
286     vacc4x89AB = vshlq_s32(vacc4x89AB, vright_pre_shift);
287     vacc4xCDEF = vshlq_s32(vacc4xCDEF, vright_pre_shift);
288 
289     vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
290     vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
291     vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
292     vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
293     vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
294     vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
295     vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
296     vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
297     vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
298     vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
299     vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
300     vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
301     vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
302     vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
303     vacc3x89AB = vqdmulhq_s32(vacc3x89AB, vmultiplier);
304     vacc3xCDEF = vqdmulhq_s32(vacc3xCDEF, vmultiplier);
305     vacc4x0123 = vqdmulhq_s32(vacc4x0123, vmultiplier);
306     vacc4x4567 = vqdmulhq_s32(vacc4x4567, vmultiplier);
307     vacc4x89AB = vqdmulhq_s32(vacc4x89AB, vmultiplier);
308     vacc4xCDEF = vqdmulhq_s32(vacc4xCDEF, vmultiplier);
309 
310     vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
311     vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
312     vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
313     vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
314     vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
315     vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
316     vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
317     vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
318     vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
319     vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
320     vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
321     vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
322     vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
323     vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
324     vacc3x89AB = vrshlq_s32(vacc3x89AB, vright_post_shift);
325     vacc3xCDEF = vrshlq_s32(vacc3xCDEF, vright_post_shift);
326     vacc4x0123 = vrshlq_s32(vacc4x0123, vright_post_shift);
327     vacc4x4567 = vrshlq_s32(vacc4x4567, vright_post_shift);
328     vacc4x89AB = vrshlq_s32(vacc4x89AB, vright_post_shift);
329     vacc4xCDEF = vrshlq_s32(vacc4xCDEF, vright_post_shift);
330 
331     const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
332 #if XNN_ARCH_ARM64
333     const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
334     const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
335     const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
336     const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
337     const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
338     const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
339     const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
340     const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x89AB), vacc3xCDEF), voutput_zero_point);
341     const int16x8_t vacc4x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc4x0123), vacc4x4567), voutput_zero_point);
342     const int16x8_t vacc4x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc4x89AB), vacc4xCDEF), voutput_zero_point);
343 
344     uint8x16_t vout0x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc0x89ABCDEF);
345     uint8x16_t vout1x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc1x01234567), vacc1x89ABCDEF);
346     uint8x16_t vout2x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc2x01234567), vacc2x89ABCDEF);
347     uint8x16_t vout3x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc3x01234567), vacc3x89ABCDEF);
348     uint8x16_t vout4x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc4x01234567), vacc4x89ABCDEF);
349 #else
350     const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
351     const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
352     const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
353     const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
354     const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
355     const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
356     const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
357     const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x89AB), vqmovn_s32(vacc3xCDEF)), voutput_zero_point);
358     const int16x8_t vacc4x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc4x0123), vqmovn_s32(vacc4x4567)), voutput_zero_point);
359     const int16x8_t vacc4x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc4x89AB), vqmovn_s32(vacc4xCDEF)), voutput_zero_point);
360 
361     uint8x16_t vout0x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc0x89ABCDEF));
362     uint8x16_t vout1x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc1x01234567), vqmovun_s16(vacc1x89ABCDEF));
363     uint8x16_t vout2x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc2x01234567), vqmovun_s16(vacc2x89ABCDEF));
364     uint8x16_t vout3x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc3x01234567), vqmovun_s16(vacc3x89ABCDEF));
365     uint8x16_t vout4x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc4x01234567), vqmovun_s16(vacc4x89ABCDEF));
366 #endif
367     const uint8x16_t voutput_min = vld1q_dup_u8(&params->rndnu_neon.output_min);
368     const uint8x16_t voutput_max = vld1q_dup_u8(&params->rndnu_neon.output_max);
369 
370     vout0x0123456789ABCDEF = vmaxq_u8(vout0x0123456789ABCDEF, voutput_min);
371     vout1x0123456789ABCDEF = vmaxq_u8(vout1x0123456789ABCDEF, voutput_min);
372     vout2x0123456789ABCDEF = vmaxq_u8(vout2x0123456789ABCDEF, voutput_min);
373     vout3x0123456789ABCDEF = vmaxq_u8(vout3x0123456789ABCDEF, voutput_min);
374     vout4x0123456789ABCDEF = vmaxq_u8(vout4x0123456789ABCDEF, voutput_min);
375 
376     vout0x0123456789ABCDEF = vminq_u8(vout0x0123456789ABCDEF, voutput_max);
377     vout1x0123456789ABCDEF = vminq_u8(vout1x0123456789ABCDEF, voutput_max);
378     vout2x0123456789ABCDEF = vminq_u8(vout2x0123456789ABCDEF, voutput_max);
379     vout3x0123456789ABCDEF = vminq_u8(vout3x0123456789ABCDEF, voutput_max);
380     vout4x0123456789ABCDEF = vminq_u8(vout4x0123456789ABCDEF, voutput_max);
381 
382     if (nc >= 16) {
383       vst1q_u8(c4 + 0, vout4x0123456789ABCDEF);
384       vst1q_u8(c3 + 0, vout3x0123456789ABCDEF);
385       vst1q_u8(c2 + 0, vout2x0123456789ABCDEF);
386       vst1q_u8(c1 + 0, vout1x0123456789ABCDEF);
387       vst1q_u8(c0 + 0, vout0x0123456789ABCDEF);
388 
389       c4 = (uint8_t*) ((uintptr_t) c4 + cn_stride);
390       c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
391       c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
392       c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
393       c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
394 
395       a = (const uint8_t**restrict) ((uintptr_t) a - ks);
396 
397       nc -= 16;
398     } else {
399       uint8x8_t vout4x01234567 = vget_low_u8(vout4x0123456789ABCDEF);
400       uint8x16_t vout2x01234567_3x01234567 = vcombine_u8(vget_low_u8(vout2x0123456789ABCDEF), vget_low_u8(vout3x0123456789ABCDEF));
401       uint8x16_t vout0x01234567_1x01234567 = vcombine_u8(vget_low_u8(vout0x0123456789ABCDEF), vget_low_u8(vout1x0123456789ABCDEF));
402       if (nc & 8) {
403         vst1_u8(c4, vout4x01234567); c4 += 8;  // This line
404         vst1_u8(c3, vget_high_u8(vout2x01234567_3x01234567)); c3 += 8;
405         vst1_u8(c2, vget_low_u8(vout2x01234567_3x01234567)); c2 += 8;
406         vst1_u8(c1, vget_high_u8(vout0x01234567_1x01234567)); c1 += 8;
407         vst1_u8(c0, vget_low_u8(vout0x01234567_1x01234567)); c0 += 8;
408         vout4x01234567 = vget_high_u8(vout4x0123456789ABCDEF);
409         vout2x01234567_3x01234567 = vcombine_u8(vget_high_u8(vout2x0123456789ABCDEF), vget_high_u8(vout3x0123456789ABCDEF));
410         vout0x01234567_1x01234567 = vcombine_u8(vget_high_u8(vout0x0123456789ABCDEF), vget_high_u8(vout1x0123456789ABCDEF));
411       }
412       if (nc & 4) {
413         vst1_lane_u32((void*) c4, vreinterpret_u32_u8(vout4x01234567), 0); c4 += 4;
414         vst1q_lane_u32((void*) c3, vreinterpretq_u32_u8(vout2x01234567_3x01234567), 2); c3 += 4;
415         vst1q_lane_u32((void*) c2, vreinterpretq_u32_u8(vout2x01234567_3x01234567), 0); c2 += 4;
416         vst1q_lane_u32((void*) c1, vreinterpretq_u32_u8(vout0x01234567_1x01234567), 2); c1 += 4;
417         vst1q_lane_u32((void*) c0, vreinterpretq_u32_u8(vout0x01234567_1x01234567), 0); c0 += 4;
418         vout4x01234567 = vext_u8(vout4x01234567, vout4x01234567, 4);
419         vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
420         vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
421       }
422       if (nc & 2) {
423         vst1_lane_u16((void*) c4, vreinterpret_u16_u8(vout4x01234567), 0); c4 += 2;
424         vst1q_lane_u16((void*) c3, vreinterpretq_u16_u8(vout2x01234567_3x01234567), 4); c3 += 2;
425         vst1q_lane_u16((void*) c2, vreinterpretq_u16_u8(vout2x01234567_3x01234567), 0); c2 += 2;
426         vst1q_lane_u16((void*) c1, vreinterpretq_u16_u8(vout0x01234567_1x01234567), 4); c1 += 2;
427         vst1q_lane_u16((void*) c0, vreinterpretq_u16_u8(vout0x01234567_1x01234567), 0); c0 += 2;
428         vout4x01234567 = vext_u8(vout4x01234567, vout4x01234567, 2);
429         vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
430         vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
431       }
432       if (nc & 1) {
433         vst1_lane_u8(c4, vout4x01234567, 0);
434         vst1q_lane_u8(c3, vout2x01234567_3x01234567, 8);
435         vst1q_lane_u8(c2, vout2x01234567_3x01234567, 0);
436         vst1q_lane_u8(c1, vout0x01234567_1x01234567, 8);
437         vst1q_lane_u8(c0, vout0x01234567_1x01234567, 0);
438       }
439 
440       nc = 0;
441     }
442   } while (nc != 0);
443 }
444