xref: /aosp_15_r20/external/XNNPACK/src/qs8-gemm/gen/4x8c8-minmax-rndnu-neon-mlal.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-gemm/c8-neon-mull.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/gemm.h>
15 #include <xnnpack/math.h>
16 
17 
xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c8__neon_mlal(size_t mr,size_t nc,size_t kc,const int8_t * restrict a,size_t a_stride,const void * restrict w,int8_t * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c8__neon_mlal(
19     size_t mr,
20     size_t nc,
21     size_t kc,
22     const int8_t* restrict a,
23     size_t a_stride,
24     const void* restrict w,
25     int8_t* restrict c,
26     size_t cm_stride,
27     size_t cn_stride,
28     const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
29 {
30   assert(mr != 0);
31   assert(mr <= 4);
32   assert(nc != 0);
33   assert(kc != 0);
34   assert(kc % sizeof(int8_t) == 0);
35   assert(a != NULL);
36   assert(w != NULL);
37   assert(c != NULL);
38 
39   kc = round_up_po2(kc, 8 * sizeof(int8_t));
40   const int8_t* a0 = a;
41   int8_t* c0 = c;
42   const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
43   int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
44   if XNN_UNPREDICTABLE(mr < 2) {
45     a1 = a0;
46     c1 = c0;
47   }
48   const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
49   int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
50   if XNN_UNPREDICTABLE(mr <= 2) {
51     a2 = a1;
52     c2 = c1;
53   }
54   const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
55   int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
56   if XNN_UNPREDICTABLE(mr != 4) {
57     a3 = a2;
58     c3 = c2;
59   }
60 
61   do {
62     int32x4_t vacc0x0 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
63     int32x4_t vacc0x1 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
64     int32x4_t vacc0x2 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
65     int32x4_t vacc0x3 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
66     int32x4_t vacc0x4 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
67     int32x4_t vacc0x5 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
68     int32x4_t vacc0x6 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
69     int32x4_t vacc0x7 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
70     int32x4_t vacc1x0 = vacc0x0;
71     int32x4_t vacc1x1 = vacc0x1;
72     int32x4_t vacc1x2 = vacc0x2;
73     int32x4_t vacc1x3 = vacc0x3;
74     int32x4_t vacc1x4 = vacc0x4;
75     int32x4_t vacc1x5 = vacc0x5;
76     int32x4_t vacc1x6 = vacc0x6;
77     int32x4_t vacc1x7 = vacc0x7;
78     int32x4_t vacc2x0 = vacc0x0;
79     int32x4_t vacc2x1 = vacc0x1;
80     int32x4_t vacc2x2 = vacc0x2;
81     int32x4_t vacc2x3 = vacc0x3;
82     int32x4_t vacc2x4 = vacc0x4;
83     int32x4_t vacc2x5 = vacc0x5;
84     int32x4_t vacc2x6 = vacc0x6;
85     int32x4_t vacc2x7 = vacc0x7;
86     int32x4_t vacc3x0 = vacc0x0;
87     int32x4_t vacc3x1 = vacc0x1;
88     int32x4_t vacc3x2 = vacc0x2;
89     int32x4_t vacc3x3 = vacc0x3;
90     int32x4_t vacc3x4 = vacc0x4;
91     int32x4_t vacc3x5 = vacc0x5;
92     int32x4_t vacc3x6 = vacc0x6;
93     int32x4_t vacc3x7 = vacc0x7;
94 
95     size_t k = kc;
96     // 2x partial unrolled loop to load 16 bytes at a time using MLA.
97     while (k >= 16 * sizeof(int8_t)) {
98       const int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
99       const int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
100       const int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
101       const int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
102       const int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
103       const int8x8_t va2x1 = vld1_s8(a2); a2 += 8;
104       const int8x8_t va3x0 = vld1_s8(a3); a3 += 8;
105       const int8x8_t va3x1 = vld1_s8(a3); a3 += 8;
106 
107       const int8x8_t vb0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(  int8_t));
108       const int8x8_t vb1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(  int8_t));
109       const int8x8_t vb2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(  int8_t));
110       const int8x8_t vb3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(  int8_t));
111       const int8x8_t vb4x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(  int8_t));
112       const int8x8_t vb5x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(  int8_t));
113       const int8x8_t vb6x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(  int8_t));
114       const int8x8_t vb7x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(  int8_t));
115 
116       const int8x8_t vb0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(  int8_t));
117       int16x8_t vprod0x0 = vmull_s8(vb0x0, va0x0);
118       int16x8_t vprod1x0 = vmull_s8(vb0x0, va1x0);
119       int16x8_t vprod2x0 = vmull_s8(vb0x0, va2x0);
120       int16x8_t vprod3x0 = vmull_s8(vb0x0, va3x0);
121       vprod0x0 = vmlal_s8(vprod0x0, vb0x1, va0x1);
122       vprod1x0 = vmlal_s8(vprod1x0, vb0x1, va1x1);
123       vprod2x0 = vmlal_s8(vprod2x0, vb0x1, va2x1);
124       vprod3x0 = vmlal_s8(vprod3x0, vb0x1, va3x1);
125       vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0);
126       vacc1x0 = vpadalq_s16(vacc1x0, vprod1x0);
127       vacc2x0 = vpadalq_s16(vacc2x0, vprod2x0);
128       vacc3x0 = vpadalq_s16(vacc3x0, vprod3x0);
129       const int8x8_t vb1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(  int8_t));
130       int16x8_t vprod0x1 = vmull_s8(vb1x0, va0x0);
131       int16x8_t vprod1x1 = vmull_s8(vb1x0, va1x0);
132       int16x8_t vprod2x1 = vmull_s8(vb1x0, va2x0);
133       int16x8_t vprod3x1 = vmull_s8(vb1x0, va3x0);
134       vprod0x1 = vmlal_s8(vprod0x1, vb1x1, va0x1);
135       vprod1x1 = vmlal_s8(vprod1x1, vb1x1, va1x1);
136       vprod2x1 = vmlal_s8(vprod2x1, vb1x1, va2x1);
137       vprod3x1 = vmlal_s8(vprod3x1, vb1x1, va3x1);
138       vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1);
139       vacc1x1 = vpadalq_s16(vacc1x1, vprod1x1);
140       vacc2x1 = vpadalq_s16(vacc2x1, vprod2x1);
141       vacc3x1 = vpadalq_s16(vacc3x1, vprod3x1);
142       const int8x8_t vb2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(  int8_t));
143       int16x8_t vprod0x2 = vmull_s8(vb2x0, va0x0);
144       int16x8_t vprod1x2 = vmull_s8(vb2x0, va1x0);
145       int16x8_t vprod2x2 = vmull_s8(vb2x0, va2x0);
146       int16x8_t vprod3x2 = vmull_s8(vb2x0, va3x0);
147       vprod0x2 = vmlal_s8(vprod0x2, vb2x1, va0x1);
148       vprod1x2 = vmlal_s8(vprod1x2, vb2x1, va1x1);
149       vprod2x2 = vmlal_s8(vprod2x2, vb2x1, va2x1);
150       vprod3x2 = vmlal_s8(vprod3x2, vb2x1, va3x1);
151       vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2);
152       vacc1x2 = vpadalq_s16(vacc1x2, vprod1x2);
153       vacc2x2 = vpadalq_s16(vacc2x2, vprod2x2);
154       vacc3x2 = vpadalq_s16(vacc3x2, vprod3x2);
155       const int8x8_t vb3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(  int8_t));
156       int16x8_t vprod0x3 = vmull_s8(vb3x0, va0x0);
157       int16x8_t vprod1x3 = vmull_s8(vb3x0, va1x0);
158       int16x8_t vprod2x3 = vmull_s8(vb3x0, va2x0);
159       int16x8_t vprod3x3 = vmull_s8(vb3x0, va3x0);
160       vprod0x3 = vmlal_s8(vprod0x3, vb3x1, va0x1);
161       vprod1x3 = vmlal_s8(vprod1x3, vb3x1, va1x1);
162       vprod2x3 = vmlal_s8(vprod2x3, vb3x1, va2x1);
163       vprod3x3 = vmlal_s8(vprod3x3, vb3x1, va3x1);
164       vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3);
165       vacc1x3 = vpadalq_s16(vacc1x3, vprod1x3);
166       vacc2x3 = vpadalq_s16(vacc2x3, vprod2x3);
167       vacc3x3 = vpadalq_s16(vacc3x3, vprod3x3);
168       const int8x8_t vb4x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(  int8_t));
169       int16x8_t vprod0x4 = vmull_s8(vb4x0, va0x0);
170       int16x8_t vprod1x4 = vmull_s8(vb4x0, va1x0);
171       int16x8_t vprod2x4 = vmull_s8(vb4x0, va2x0);
172       int16x8_t vprod3x4 = vmull_s8(vb4x0, va3x0);
173       vprod0x4 = vmlal_s8(vprod0x4, vb4x1, va0x1);
174       vprod1x4 = vmlal_s8(vprod1x4, vb4x1, va1x1);
175       vprod2x4 = vmlal_s8(vprod2x4, vb4x1, va2x1);
176       vprod3x4 = vmlal_s8(vprod3x4, vb4x1, va3x1);
177       vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4);
178       vacc1x4 = vpadalq_s16(vacc1x4, vprod1x4);
179       vacc2x4 = vpadalq_s16(vacc2x4, vprod2x4);
180       vacc3x4 = vpadalq_s16(vacc3x4, vprod3x4);
181       const int8x8_t vb5x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(  int8_t));
182       int16x8_t vprod0x5 = vmull_s8(vb5x0, va0x0);
183       int16x8_t vprod1x5 = vmull_s8(vb5x0, va1x0);
184       int16x8_t vprod2x5 = vmull_s8(vb5x0, va2x0);
185       int16x8_t vprod3x5 = vmull_s8(vb5x0, va3x0);
186       vprod0x5 = vmlal_s8(vprod0x5, vb5x1, va0x1);
187       vprod1x5 = vmlal_s8(vprod1x5, vb5x1, va1x1);
188       vprod2x5 = vmlal_s8(vprod2x5, vb5x1, va2x1);
189       vprod3x5 = vmlal_s8(vprod3x5, vb5x1, va3x1);
190       vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5);
191       vacc1x5 = vpadalq_s16(vacc1x5, vprod1x5);
192       vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5);
193       vacc3x5 = vpadalq_s16(vacc3x5, vprod3x5);
194       const int8x8_t vb6x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(  int8_t));
195       int16x8_t vprod0x6 = vmull_s8(vb6x0, va0x0);
196       int16x8_t vprod1x6 = vmull_s8(vb6x0, va1x0);
197       int16x8_t vprod2x6 = vmull_s8(vb6x0, va2x0);
198       int16x8_t vprod3x6 = vmull_s8(vb6x0, va3x0);
199       vprod0x6 = vmlal_s8(vprod0x6, vb6x1, va0x1);
200       vprod1x6 = vmlal_s8(vprod1x6, vb6x1, va1x1);
201       vprod2x6 = vmlal_s8(vprod2x6, vb6x1, va2x1);
202       vprod3x6 = vmlal_s8(vprod3x6, vb6x1, va3x1);
203       vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6);
204       vacc1x6 = vpadalq_s16(vacc1x6, vprod1x6);
205       vacc2x6 = vpadalq_s16(vacc2x6, vprod2x6);
206       vacc3x6 = vpadalq_s16(vacc3x6, vprod3x6);
207       const int8x8_t vb7x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(  int8_t));
208       int16x8_t vprod0x7 = vmull_s8(vb7x0, va0x0);
209       int16x8_t vprod1x7 = vmull_s8(vb7x0, va1x0);
210       int16x8_t vprod2x7 = vmull_s8(vb7x0, va2x0);
211       int16x8_t vprod3x7 = vmull_s8(vb7x0, va3x0);
212       vprod0x7 = vmlal_s8(vprod0x7, vb7x1, va0x1);
213       vprod1x7 = vmlal_s8(vprod1x7, vb7x1, va1x1);
214       vprod2x7 = vmlal_s8(vprod2x7, vb7x1, va2x1);
215       vprod3x7 = vmlal_s8(vprod3x7, vb7x1, va3x1);
216       vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7);
217       vacc1x7 = vpadalq_s16(vacc1x7, vprod1x7);
218       vacc2x7 = vpadalq_s16(vacc2x7, vprod2x7);
219       vacc3x7 = vpadalq_s16(vacc3x7, vprod3x7);
220 
221       k -= 16 * sizeof(int8_t);
222     }
223 
224     // Handle 8 bytes at a time using MUL.
225     if (k != 0) {
226       const int8x8_t va0 = vld1_s8(a0); a0 += 8;
227       const int8x8_t va1 = vld1_s8(a1); a1 += 8;
228       const int8x8_t va2 = vld1_s8(a2); a2 += 8;
229       const int8x8_t va3 = vld1_s8(a3); a3 += 8;
230 
231       const int8x8_t vb0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
232       const int16x8_t vprod0x0 = vmull_s8(vb0, va0);
233       const int16x8_t vprod1x0 = vmull_s8(vb0, va1);
234       const int16x8_t vprod2x0 = vmull_s8(vb0, va2);
235       const int16x8_t vprod3x0 = vmull_s8(vb0, va3);
236       vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0);
237       vacc1x0 = vpadalq_s16(vacc1x0, vprod1x0);
238       vacc2x0 = vpadalq_s16(vacc2x0, vprod2x0);
239       vacc3x0 = vpadalq_s16(vacc3x0, vprod3x0);
240       const int8x8_t vb1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
241       const int16x8_t vprod0x1 = vmull_s8(vb1, va0);
242       const int16x8_t vprod1x1 = vmull_s8(vb1, va1);
243       const int16x8_t vprod2x1 = vmull_s8(vb1, va2);
244       const int16x8_t vprod3x1 = vmull_s8(vb1, va3);
245       vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1);
246       vacc1x1 = vpadalq_s16(vacc1x1, vprod1x1);
247       vacc2x1 = vpadalq_s16(vacc2x1, vprod2x1);
248       vacc3x1 = vpadalq_s16(vacc3x1, vprod3x1);
249       const int8x8_t vb2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
250       const int16x8_t vprod0x2 = vmull_s8(vb2, va0);
251       const int16x8_t vprod1x2 = vmull_s8(vb2, va1);
252       const int16x8_t vprod2x2 = vmull_s8(vb2, va2);
253       const int16x8_t vprod3x2 = vmull_s8(vb2, va3);
254       vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2);
255       vacc1x2 = vpadalq_s16(vacc1x2, vprod1x2);
256       vacc2x2 = vpadalq_s16(vacc2x2, vprod2x2);
257       vacc3x2 = vpadalq_s16(vacc3x2, vprod3x2);
258       const int8x8_t vb3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
259       const int16x8_t vprod0x3 = vmull_s8(vb3, va0);
260       const int16x8_t vprod1x3 = vmull_s8(vb3, va1);
261       const int16x8_t vprod2x3 = vmull_s8(vb3, va2);
262       const int16x8_t vprod3x3 = vmull_s8(vb3, va3);
263       vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3);
264       vacc1x3 = vpadalq_s16(vacc1x3, vprod1x3);
265       vacc2x3 = vpadalq_s16(vacc2x3, vprod2x3);
266       vacc3x3 = vpadalq_s16(vacc3x3, vprod3x3);
267       const int8x8_t vb4 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
268       const int16x8_t vprod0x4 = vmull_s8(vb4, va0);
269       const int16x8_t vprod1x4 = vmull_s8(vb4, va1);
270       const int16x8_t vprod2x4 = vmull_s8(vb4, va2);
271       const int16x8_t vprod3x4 = vmull_s8(vb4, va3);
272       vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4);
273       vacc1x4 = vpadalq_s16(vacc1x4, vprod1x4);
274       vacc2x4 = vpadalq_s16(vacc2x4, vprod2x4);
275       vacc3x4 = vpadalq_s16(vacc3x4, vprod3x4);
276       const int8x8_t vb5 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
277       const int16x8_t vprod0x5 = vmull_s8(vb5, va0);
278       const int16x8_t vprod1x5 = vmull_s8(vb5, va1);
279       const int16x8_t vprod2x5 = vmull_s8(vb5, va2);
280       const int16x8_t vprod3x5 = vmull_s8(vb5, va3);
281       vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5);
282       vacc1x5 = vpadalq_s16(vacc1x5, vprod1x5);
283       vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5);
284       vacc3x5 = vpadalq_s16(vacc3x5, vprod3x5);
285       const int8x8_t vb6 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
286       const int16x8_t vprod0x6 = vmull_s8(vb6, va0);
287       const int16x8_t vprod1x6 = vmull_s8(vb6, va1);
288       const int16x8_t vprod2x6 = vmull_s8(vb6, va2);
289       const int16x8_t vprod3x6 = vmull_s8(vb6, va3);
290       vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6);
291       vacc1x6 = vpadalq_s16(vacc1x6, vprod1x6);
292       vacc2x6 = vpadalq_s16(vacc2x6, vprod2x6);
293       vacc3x6 = vpadalq_s16(vacc3x6, vprod3x6);
294       const int8x8_t vb7 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
295       const int16x8_t vprod0x7 = vmull_s8(vb7, va0);
296       const int16x8_t vprod1x7 = vmull_s8(vb7, va1);
297       const int16x8_t vprod2x7 = vmull_s8(vb7, va2);
298       const int16x8_t vprod3x7 = vmull_s8(vb7, va3);
299       vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7);
300       vacc1x7 = vpadalq_s16(vacc1x7, vprod1x7);
301       vacc2x7 = vpadalq_s16(vacc2x7, vprod2x7);
302       vacc3x7 = vpadalq_s16(vacc3x7, vprod3x7);
303 
304       k -= 8 * sizeof(int8_t);
305     }
306 
307 #if XNN_ARCH_ARM64
308     const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1);
309     const int32x4_t vsum0x23 = vpaddq_s32(vacc0x2, vacc0x3);
310     const int32x4_t vsum0x45 = vpaddq_s32(vacc0x4, vacc0x5);
311     const int32x4_t vsum0x67 = vpaddq_s32(vacc0x6, vacc0x7);
312     const int32x4_t vsum1x01 = vpaddq_s32(vacc1x0, vacc1x1);
313     const int32x4_t vsum1x23 = vpaddq_s32(vacc1x2, vacc1x3);
314     const int32x4_t vsum1x45 = vpaddq_s32(vacc1x4, vacc1x5);
315     const int32x4_t vsum1x67 = vpaddq_s32(vacc1x6, vacc1x7);
316     const int32x4_t vsum2x01 = vpaddq_s32(vacc2x0, vacc2x1);
317     const int32x4_t vsum2x23 = vpaddq_s32(vacc2x2, vacc2x3);
318     const int32x4_t vsum2x45 = vpaddq_s32(vacc2x4, vacc2x5);
319     const int32x4_t vsum2x67 = vpaddq_s32(vacc2x6, vacc2x7);
320     const int32x4_t vsum3x01 = vpaddq_s32(vacc3x0, vacc3x1);
321     const int32x4_t vsum3x23 = vpaddq_s32(vacc3x2, vacc3x3);
322     const int32x4_t vsum3x45 = vpaddq_s32(vacc3x4, vacc3x5);
323     const int32x4_t vsum3x67 = vpaddq_s32(vacc3x6, vacc3x7);
324 
325     int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23);
326     int32x4_t vacc0x4567 = vpaddq_s32(vsum0x45, vsum0x67);
327     int32x4_t vacc1x0123 = vpaddq_s32(vsum1x01, vsum1x23);
328     int32x4_t vacc1x4567 = vpaddq_s32(vsum1x45, vsum1x67);
329     int32x4_t vacc2x0123 = vpaddq_s32(vsum2x01, vsum2x23);
330     int32x4_t vacc2x4567 = vpaddq_s32(vsum2x45, vsum2x67);
331     int32x4_t vacc3x0123 = vpaddq_s32(vsum3x01, vsum3x23);
332     int32x4_t vacc3x4567 = vpaddq_s32(vsum3x45, vsum3x67);
333 #else
334     const int32x2_t vpsum0x0 = vadd_s32(vget_low_s32(vacc0x0), vget_high_s32(vacc0x0));
335     const int32x2_t vpsum0x1 = vadd_s32(vget_low_s32(vacc0x1), vget_high_s32(vacc0x1));
336     const int32x2_t vpsum0x2 = vadd_s32(vget_low_s32(vacc0x2), vget_high_s32(vacc0x2));
337     const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3));
338     const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1);
339     const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3);
340     int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 );
341     const int32x2_t vpsum0x4 = vadd_s32(vget_low_s32(vacc0x4), vget_high_s32(vacc0x4));
342     const int32x2_t vpsum0x5 = vadd_s32(vget_low_s32(vacc0x5), vget_high_s32(vacc0x5));
343     const int32x2_t vpsum0x6 = vadd_s32(vget_low_s32(vacc0x6), vget_high_s32(vacc0x6));
344     const int32x2_t vpsum0x7 = vadd_s32(vget_low_s32(vacc0x7), vget_high_s32(vacc0x7));
345     const int32x2_t vsum0x45 = vpadd_s32(vpsum0x4, vpsum0x5);
346     const int32x2_t vsum0x67 = vpadd_s32(vpsum0x6, vpsum0x7);
347     int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67 );
348     const int32x2_t vpsum1x0 = vadd_s32(vget_low_s32(vacc1x0), vget_high_s32(vacc1x0));
349     const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1));
350     const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2));
351     const int32x2_t vpsum1x3 = vadd_s32(vget_low_s32(vacc1x3), vget_high_s32(vacc1x3));
352     const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1);
353     const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3);
354     int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23 );
355     const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4));
356     const int32x2_t vpsum1x5 = vadd_s32(vget_low_s32(vacc1x5), vget_high_s32(vacc1x5));
357     const int32x2_t vpsum1x6 = vadd_s32(vget_low_s32(vacc1x6), vget_high_s32(vacc1x6));
358     const int32x2_t vpsum1x7 = vadd_s32(vget_low_s32(vacc1x7), vget_high_s32(vacc1x7));
359     const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5);
360     const int32x2_t vsum1x67 = vpadd_s32(vpsum1x6, vpsum1x7);
361     int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67 );
362     const int32x2_t vpsum2x0 = vadd_s32(vget_low_s32(vacc2x0), vget_high_s32(vacc2x0));
363     const int32x2_t vpsum2x1 = vadd_s32(vget_low_s32(vacc2x1), vget_high_s32(vacc2x1));
364     const int32x2_t vpsum2x2 = vadd_s32(vget_low_s32(vacc2x2), vget_high_s32(vacc2x2));
365     const int32x2_t vpsum2x3 = vadd_s32(vget_low_s32(vacc2x3), vget_high_s32(vacc2x3));
366     const int32x2_t vsum2x01 = vpadd_s32(vpsum2x0, vpsum2x1);
367     const int32x2_t vsum2x23 = vpadd_s32(vpsum2x2, vpsum2x3);
368     int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23 );
369     const int32x2_t vpsum2x4 = vadd_s32(vget_low_s32(vacc2x4), vget_high_s32(vacc2x4));
370     const int32x2_t vpsum2x5 = vadd_s32(vget_low_s32(vacc2x5), vget_high_s32(vacc2x5));
371     const int32x2_t vpsum2x6 = vadd_s32(vget_low_s32(vacc2x6), vget_high_s32(vacc2x6));
372     const int32x2_t vpsum2x7 = vadd_s32(vget_low_s32(vacc2x7), vget_high_s32(vacc2x7));
373     const int32x2_t vsum2x45 = vpadd_s32(vpsum2x4, vpsum2x5);
374     const int32x2_t vsum2x67 = vpadd_s32(vpsum2x6, vpsum2x7);
375     int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67 );
376     const int32x2_t vpsum3x0 = vadd_s32(vget_low_s32(vacc3x0), vget_high_s32(vacc3x0));
377     const int32x2_t vpsum3x1 = vadd_s32(vget_low_s32(vacc3x1), vget_high_s32(vacc3x1));
378     const int32x2_t vpsum3x2 = vadd_s32(vget_low_s32(vacc3x2), vget_high_s32(vacc3x2));
379     const int32x2_t vpsum3x3 = vadd_s32(vget_low_s32(vacc3x3), vget_high_s32(vacc3x3));
380     const int32x2_t vsum3x01 = vpadd_s32(vpsum3x0, vpsum3x1);
381     const int32x2_t vsum3x23 = vpadd_s32(vpsum3x2, vpsum3x3);
382     int32x4_t vacc3x0123 = vcombine_s32(vsum3x01, vsum3x23 );
383     const int32x2_t vpsum3x4 = vadd_s32(vget_low_s32(vacc3x4), vget_high_s32(vacc3x4));
384     const int32x2_t vpsum3x5 = vadd_s32(vget_low_s32(vacc3x5), vget_high_s32(vacc3x5));
385     const int32x2_t vpsum3x6 = vadd_s32(vget_low_s32(vacc3x6), vget_high_s32(vacc3x6));
386     const int32x2_t vpsum3x7 = vadd_s32(vget_low_s32(vacc3x7), vget_high_s32(vacc3x7));
387     const int32x2_t vsum3x45 = vpadd_s32(vpsum3x4, vpsum3x5);
388     const int32x2_t vsum3x67 = vpadd_s32(vpsum3x6, vpsum3x7);
389     int32x4_t vacc3x4567 = vcombine_s32(vsum3x45, vsum3x67 );
390 #endif
391 
392     const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
393     const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
394     const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
395 
396     vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
397     vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
398     vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift);
399     vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift);
400     vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift);
401     vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift);
402     vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift);
403     vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift);
404 
405     vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
406     vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
407     vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
408     vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
409     vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
410     vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
411     vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
412     vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
413 
414     vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
415     vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
416     vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
417     vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
418     vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
419     vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
420     vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
421     vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
422 
423     const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
424 #if XNN_ARCH_ARM64
425     int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
426     int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
427     int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567);
428     int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567);
429 
430     vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
431     vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
432     vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
433     vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
434 
435     int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
436     int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
437 #else
438     int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
439     int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
440     int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567));
441     int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567));
442 
443     vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
444     vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
445     vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
446     vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
447 
448     int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
449     int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
450 #endif
451 
452     const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
453     vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
454     vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
455 
456     const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
457     vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
458     vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
459 
460     if (nc >= 8) {
461       vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
462       vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
463       vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
464       vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
465 
466       c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
467       c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
468       c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
469       c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
470 
471       a0 = (const int8_t*) ((uintptr_t) a0 - kc);
472       a1 = (const int8_t*) ((uintptr_t) a1 - kc);
473       a2 = (const int8_t*) ((uintptr_t) a2 - kc);
474       a3 = (const int8_t*) ((uintptr_t) a3 - kc);
475 
476       nc -= 8;
477     } else {
478       // Final case where not all of the 8 columns fit in the destination.
479       if (nc & 4) {
480         vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
481         vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
482         vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
483         vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
484         vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
485         vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
486       }
487       if (nc & 2) {
488         vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
489         vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
490         vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
491         vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
492         vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
493         vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
494       }
495       if (nc & 1) {
496         vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
497         vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
498         vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
499         vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
500       }
501 
502       nc = 0;
503     }
504   } while (nc != 0);
505 }
506