xref: /aosp_15_r20/external/XNNPACK/src/qs8-gemm/gen/4x8-minmax-rndnu-neon-mull-addw-dup.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-gemm/neon-mull-addw-dup.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/gemm.h>
16 
17 
xnn_qs8_gemm_minmax_rndnu_ukernel_4x8__neon_mull_addw_dup(size_t mr,size_t nc,size_t kc,const int8_t * restrict a,size_t a_stride,const void * restrict w,int8_t * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qs8_gemm_minmax_rndnu_ukernel_4x8__neon_mull_addw_dup(
19     size_t mr,
20     size_t nc,
21     size_t kc,
22     const int8_t* restrict a,
23     size_t a_stride,
24     const void* restrict w,
25     int8_t* restrict c,
26     size_t cm_stride,
27     size_t cn_stride,
28     const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
29 {
30   assert(mr != 0);
31   assert(mr <= 4);
32   assert(nc != 0);
33   assert(kc != 0);
34   assert(kc % sizeof(int8_t) == 0);
35   assert(a != NULL);
36   assert(w != NULL);
37   assert(c != NULL);
38 
39   const int8_t* a0 = a;
40   int8_t* c0 = c;
41   const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
42   int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
43   if XNN_UNPREDICTABLE(mr < 2) {
44     a1 = a0;
45     c1 = c0;
46   }
47   const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
48   int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
49   if XNN_UNPREDICTABLE(mr <= 2) {
50     a2 = a1;
51     c2 = c1;
52   }
53   const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
54   int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
55   if XNN_UNPREDICTABLE(mr != 4) {
56     a3 = a2;
57     c3 = c2;
58   }
59 
60   do {
61     int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
62     int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
63     int32x4_t vacc1x0123 = vacc0x0123;
64     int32x4_t vacc1x4567 = vacc0x4567;
65     int32x4_t vacc2x0123 = vacc0x0123;
66     int32x4_t vacc2x4567 = vacc0x4567;
67     int32x4_t vacc3x0123 = vacc0x0123;
68     int32x4_t vacc3x4567 = vacc0x4567;
69 
70     size_t k = kc;
71     while (k >= 8 * sizeof(int8_t)) {
72       const int8x8_t va0 = vld1_s8(a0); a0 += 8;
73       const int8x8_t va1 = vld1_s8(a1); a1 += 8;
74       const int8x8_t va2 = vld1_s8(a2); a2 += 8;
75       const int8x8_t va3 = vld1_s8(a3); a3 += 8;
76 
77       const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
78 
79       const int16x8_t vprod0x01234567c0 = vmull_s8(vb01234567c0, vdup_lane_s8(va0, 0));
80       vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c0));
81       vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c0));
82       const int16x8_t vprod1x01234567c0 = vmull_s8(vb01234567c0, vdup_lane_s8(va1, 0));
83       vacc1x0123 = vaddw_s16(vacc1x0123, vget_low_s16(vprod1x01234567c0));
84       vacc1x4567 = vaddw_s16(vacc1x4567, vget_high_s16(vprod1x01234567c0));
85       const int16x8_t vprod2x01234567c0 = vmull_s8(vb01234567c0, vdup_lane_s8(va2, 0));
86       vacc2x0123 = vaddw_s16(vacc2x0123, vget_low_s16(vprod2x01234567c0));
87       vacc2x4567 = vaddw_s16(vacc2x4567, vget_high_s16(vprod2x01234567c0));
88       const int16x8_t vprod3x01234567c0 = vmull_s8(vb01234567c0, vdup_lane_s8(va3, 0));
89       vacc3x0123 = vaddw_s16(vacc3x0123, vget_low_s16(vprod3x01234567c0));
90       vacc3x4567 = vaddw_s16(vacc3x4567, vget_high_s16(vprod3x01234567c0));
91       const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
92 
93       const int16x8_t vprod0x01234567c1 = vmull_s8(vb01234567c1, vdup_lane_s8(va0, 1));
94       vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c1));
95       vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c1));
96       const int16x8_t vprod1x01234567c1 = vmull_s8(vb01234567c1, vdup_lane_s8(va1, 1));
97       vacc1x0123 = vaddw_s16(vacc1x0123, vget_low_s16(vprod1x01234567c1));
98       vacc1x4567 = vaddw_s16(vacc1x4567, vget_high_s16(vprod1x01234567c1));
99       const int16x8_t vprod2x01234567c1 = vmull_s8(vb01234567c1, vdup_lane_s8(va2, 1));
100       vacc2x0123 = vaddw_s16(vacc2x0123, vget_low_s16(vprod2x01234567c1));
101       vacc2x4567 = vaddw_s16(vacc2x4567, vget_high_s16(vprod2x01234567c1));
102       const int16x8_t vprod3x01234567c1 = vmull_s8(vb01234567c1, vdup_lane_s8(va3, 1));
103       vacc3x0123 = vaddw_s16(vacc3x0123, vget_low_s16(vprod3x01234567c1));
104       vacc3x4567 = vaddw_s16(vacc3x4567, vget_high_s16(vprod3x01234567c1));
105       const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
106 
107       const int16x8_t vprod0x01234567c2 = vmull_s8(vb01234567c2, vdup_lane_s8(va0, 2));
108       vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c2));
109       vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c2));
110       const int16x8_t vprod1x01234567c2 = vmull_s8(vb01234567c2, vdup_lane_s8(va1, 2));
111       vacc1x0123 = vaddw_s16(vacc1x0123, vget_low_s16(vprod1x01234567c2));
112       vacc1x4567 = vaddw_s16(vacc1x4567, vget_high_s16(vprod1x01234567c2));
113       const int16x8_t vprod2x01234567c2 = vmull_s8(vb01234567c2, vdup_lane_s8(va2, 2));
114       vacc2x0123 = vaddw_s16(vacc2x0123, vget_low_s16(vprod2x01234567c2));
115       vacc2x4567 = vaddw_s16(vacc2x4567, vget_high_s16(vprod2x01234567c2));
116       const int16x8_t vprod3x01234567c2 = vmull_s8(vb01234567c2, vdup_lane_s8(va3, 2));
117       vacc3x0123 = vaddw_s16(vacc3x0123, vget_low_s16(vprod3x01234567c2));
118       vacc3x4567 = vaddw_s16(vacc3x4567, vget_high_s16(vprod3x01234567c2));
119       const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
120 
121       const int16x8_t vprod0x01234567c3 = vmull_s8(vb01234567c3, vdup_lane_s8(va0, 3));
122       vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c3));
123       vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c3));
124       const int16x8_t vprod1x01234567c3 = vmull_s8(vb01234567c3, vdup_lane_s8(va1, 3));
125       vacc1x0123 = vaddw_s16(vacc1x0123, vget_low_s16(vprod1x01234567c3));
126       vacc1x4567 = vaddw_s16(vacc1x4567, vget_high_s16(vprod1x01234567c3));
127       const int16x8_t vprod2x01234567c3 = vmull_s8(vb01234567c3, vdup_lane_s8(va2, 3));
128       vacc2x0123 = vaddw_s16(vacc2x0123, vget_low_s16(vprod2x01234567c3));
129       vacc2x4567 = vaddw_s16(vacc2x4567, vget_high_s16(vprod2x01234567c3));
130       const int16x8_t vprod3x01234567c3 = vmull_s8(vb01234567c3, vdup_lane_s8(va3, 3));
131       vacc3x0123 = vaddw_s16(vacc3x0123, vget_low_s16(vprod3x01234567c3));
132       vacc3x4567 = vaddw_s16(vacc3x4567, vget_high_s16(vprod3x01234567c3));
133       const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
134 
135       const int16x8_t vprod0x01234567c4 = vmull_s8(vb01234567c4, vdup_lane_s8(va0, 4));
136       vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c4));
137       vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c4));
138       const int16x8_t vprod1x01234567c4 = vmull_s8(vb01234567c4, vdup_lane_s8(va1, 4));
139       vacc1x0123 = vaddw_s16(vacc1x0123, vget_low_s16(vprod1x01234567c4));
140       vacc1x4567 = vaddw_s16(vacc1x4567, vget_high_s16(vprod1x01234567c4));
141       const int16x8_t vprod2x01234567c4 = vmull_s8(vb01234567c4, vdup_lane_s8(va2, 4));
142       vacc2x0123 = vaddw_s16(vacc2x0123, vget_low_s16(vprod2x01234567c4));
143       vacc2x4567 = vaddw_s16(vacc2x4567, vget_high_s16(vprod2x01234567c4));
144       const int16x8_t vprod3x01234567c4 = vmull_s8(vb01234567c4, vdup_lane_s8(va3, 4));
145       vacc3x0123 = vaddw_s16(vacc3x0123, vget_low_s16(vprod3x01234567c4));
146       vacc3x4567 = vaddw_s16(vacc3x4567, vget_high_s16(vprod3x01234567c4));
147       const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
148 
149       const int16x8_t vprod0x01234567c5 = vmull_s8(vb01234567c5, vdup_lane_s8(va0, 5));
150       vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c5));
151       vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c5));
152       const int16x8_t vprod1x01234567c5 = vmull_s8(vb01234567c5, vdup_lane_s8(va1, 5));
153       vacc1x0123 = vaddw_s16(vacc1x0123, vget_low_s16(vprod1x01234567c5));
154       vacc1x4567 = vaddw_s16(vacc1x4567, vget_high_s16(vprod1x01234567c5));
155       const int16x8_t vprod2x01234567c5 = vmull_s8(vb01234567c5, vdup_lane_s8(va2, 5));
156       vacc2x0123 = vaddw_s16(vacc2x0123, vget_low_s16(vprod2x01234567c5));
157       vacc2x4567 = vaddw_s16(vacc2x4567, vget_high_s16(vprod2x01234567c5));
158       const int16x8_t vprod3x01234567c5 = vmull_s8(vb01234567c5, vdup_lane_s8(va3, 5));
159       vacc3x0123 = vaddw_s16(vacc3x0123, vget_low_s16(vprod3x01234567c5));
160       vacc3x4567 = vaddw_s16(vacc3x4567, vget_high_s16(vprod3x01234567c5));
161       const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
162 
163       const int16x8_t vprod0x01234567c6 = vmull_s8(vb01234567c6, vdup_lane_s8(va0, 6));
164       vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c6));
165       vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c6));
166       const int16x8_t vprod1x01234567c6 = vmull_s8(vb01234567c6, vdup_lane_s8(va1, 6));
167       vacc1x0123 = vaddw_s16(vacc1x0123, vget_low_s16(vprod1x01234567c6));
168       vacc1x4567 = vaddw_s16(vacc1x4567, vget_high_s16(vprod1x01234567c6));
169       const int16x8_t vprod2x01234567c6 = vmull_s8(vb01234567c6, vdup_lane_s8(va2, 6));
170       vacc2x0123 = vaddw_s16(vacc2x0123, vget_low_s16(vprod2x01234567c6));
171       vacc2x4567 = vaddw_s16(vacc2x4567, vget_high_s16(vprod2x01234567c6));
172       const int16x8_t vprod3x01234567c6 = vmull_s8(vb01234567c6, vdup_lane_s8(va3, 6));
173       vacc3x0123 = vaddw_s16(vacc3x0123, vget_low_s16(vprod3x01234567c6));
174       vacc3x4567 = vaddw_s16(vacc3x4567, vget_high_s16(vprod3x01234567c6));
175       const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
176 
177       const int16x8_t vprod0x01234567c7 = vmull_s8(vb01234567c7, vdup_lane_s8(va0, 7));
178       vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c7));
179       vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c7));
180       const int16x8_t vprod1x01234567c7 = vmull_s8(vb01234567c7, vdup_lane_s8(va1, 7));
181       vacc1x0123 = vaddw_s16(vacc1x0123, vget_low_s16(vprod1x01234567c7));
182       vacc1x4567 = vaddw_s16(vacc1x4567, vget_high_s16(vprod1x01234567c7));
183       const int16x8_t vprod2x01234567c7 = vmull_s8(vb01234567c7, vdup_lane_s8(va2, 7));
184       vacc2x0123 = vaddw_s16(vacc2x0123, vget_low_s16(vprod2x01234567c7));
185       vacc2x4567 = vaddw_s16(vacc2x4567, vget_high_s16(vprod2x01234567c7));
186       const int16x8_t vprod3x01234567c7 = vmull_s8(vb01234567c7, vdup_lane_s8(va3, 7));
187       vacc3x0123 = vaddw_s16(vacc3x0123, vget_low_s16(vprod3x01234567c7));
188       vacc3x4567 = vaddw_s16(vacc3x4567, vget_high_s16(vprod3x01234567c7));
189 
190       k -= 8 * sizeof(int8_t);
191     }
192     if XNN_UNLIKELY(k != 0) {
193       const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
194       const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
195       const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
196       const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
197 
198       const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
199 
200       const int16x8_t vprod0x01234567c0 = vmull_s8(vb01234567c0, vdup_lane_s8(va0, 0));
201       vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c0));
202       vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c0));
203       const int16x8_t vprod1x01234567c0 = vmull_s8(vb01234567c0, vdup_lane_s8(va1, 0));
204       vacc1x0123 = vaddw_s16(vacc1x0123, vget_low_s16(vprod1x01234567c0));
205       vacc1x4567 = vaddw_s16(vacc1x4567, vget_high_s16(vprod1x01234567c0));
206       const int16x8_t vprod2x01234567c0 = vmull_s8(vb01234567c0, vdup_lane_s8(va2, 0));
207       vacc2x0123 = vaddw_s16(vacc2x0123, vget_low_s16(vprod2x01234567c0));
208       vacc2x4567 = vaddw_s16(vacc2x4567, vget_high_s16(vprod2x01234567c0));
209       const int16x8_t vprod3x01234567c0 = vmull_s8(vb01234567c0, vdup_lane_s8(va3, 0));
210       vacc3x0123 = vaddw_s16(vacc3x0123, vget_low_s16(vprod3x01234567c0));
211       vacc3x4567 = vaddw_s16(vacc3x4567, vget_high_s16(vprod3x01234567c0));
212 
213       if (k >= 2 * sizeof(int8_t)) {
214         const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
215 
216         const int16x8_t vprod0x01234567c1 = vmull_s8(vb01234567c1, vdup_lane_s8(va0, 1));
217         vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c1));
218         vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c1));
219         const int16x8_t vprod1x01234567c1 = vmull_s8(vb01234567c1, vdup_lane_s8(va1, 1));
220         vacc1x0123 = vaddw_s16(vacc1x0123, vget_low_s16(vprod1x01234567c1));
221         vacc1x4567 = vaddw_s16(vacc1x4567, vget_high_s16(vprod1x01234567c1));
222         const int16x8_t vprod2x01234567c1 = vmull_s8(vb01234567c1, vdup_lane_s8(va2, 1));
223         vacc2x0123 = vaddw_s16(vacc2x0123, vget_low_s16(vprod2x01234567c1));
224         vacc2x4567 = vaddw_s16(vacc2x4567, vget_high_s16(vprod2x01234567c1));
225         const int16x8_t vprod3x01234567c1 = vmull_s8(vb01234567c1, vdup_lane_s8(va3, 1));
226         vacc3x0123 = vaddw_s16(vacc3x0123, vget_low_s16(vprod3x01234567c1));
227         vacc3x4567 = vaddw_s16(vacc3x4567, vget_high_s16(vprod3x01234567c1));
228 
229         if (k > 2 * sizeof(int8_t)) {
230           const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
231 
232           const int16x8_t vprod0x01234567c2 = vmull_s8(vb01234567c2, vdup_lane_s8(va0, 2));
233           vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c2));
234           vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c2));
235           const int16x8_t vprod1x01234567c2 = vmull_s8(vb01234567c2, vdup_lane_s8(va1, 2));
236           vacc1x0123 = vaddw_s16(vacc1x0123, vget_low_s16(vprod1x01234567c2));
237           vacc1x4567 = vaddw_s16(vacc1x4567, vget_high_s16(vprod1x01234567c2));
238           const int16x8_t vprod2x01234567c2 = vmull_s8(vb01234567c2, vdup_lane_s8(va2, 2));
239           vacc2x0123 = vaddw_s16(vacc2x0123, vget_low_s16(vprod2x01234567c2));
240           vacc2x4567 = vaddw_s16(vacc2x4567, vget_high_s16(vprod2x01234567c2));
241           const int16x8_t vprod3x01234567c2 = vmull_s8(vb01234567c2, vdup_lane_s8(va3, 2));
242           vacc3x0123 = vaddw_s16(vacc3x0123, vget_low_s16(vprod3x01234567c2));
243           vacc3x4567 = vaddw_s16(vacc3x4567, vget_high_s16(vprod3x01234567c2));
244 
245           if (k >= 4 * sizeof(int8_t)) {
246             const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
247 
248             const int16x8_t vprod0x01234567c3 = vmull_s8(vb01234567c3, vdup_lane_s8(va0, 3));
249             vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c3));
250             vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c3));
251             const int16x8_t vprod1x01234567c3 = vmull_s8(vb01234567c3, vdup_lane_s8(va1, 3));
252             vacc1x0123 = vaddw_s16(vacc1x0123, vget_low_s16(vprod1x01234567c3));
253             vacc1x4567 = vaddw_s16(vacc1x4567, vget_high_s16(vprod1x01234567c3));
254             const int16x8_t vprod2x01234567c3 = vmull_s8(vb01234567c3, vdup_lane_s8(va2, 3));
255             vacc2x0123 = vaddw_s16(vacc2x0123, vget_low_s16(vprod2x01234567c3));
256             vacc2x4567 = vaddw_s16(vacc2x4567, vget_high_s16(vprod2x01234567c3));
257             const int16x8_t vprod3x01234567c3 = vmull_s8(vb01234567c3, vdup_lane_s8(va3, 3));
258             vacc3x0123 = vaddw_s16(vacc3x0123, vget_low_s16(vprod3x01234567c3));
259             vacc3x4567 = vaddw_s16(vacc3x4567, vget_high_s16(vprod3x01234567c3));
260 
261             if (k > 4 * sizeof(int8_t)) {
262               const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
263 
264               const int16x8_t vprod0x01234567c4 = vmull_s8(vb01234567c4, vdup_lane_s8(va0, 4));
265               vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c4));
266               vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c4));
267               const int16x8_t vprod1x01234567c4 = vmull_s8(vb01234567c4, vdup_lane_s8(va1, 4));
268               vacc1x0123 = vaddw_s16(vacc1x0123, vget_low_s16(vprod1x01234567c4));
269               vacc1x4567 = vaddw_s16(vacc1x4567, vget_high_s16(vprod1x01234567c4));
270               const int16x8_t vprod2x01234567c4 = vmull_s8(vb01234567c4, vdup_lane_s8(va2, 4));
271               vacc2x0123 = vaddw_s16(vacc2x0123, vget_low_s16(vprod2x01234567c4));
272               vacc2x4567 = vaddw_s16(vacc2x4567, vget_high_s16(vprod2x01234567c4));
273               const int16x8_t vprod3x01234567c4 = vmull_s8(vb01234567c4, vdup_lane_s8(va3, 4));
274               vacc3x0123 = vaddw_s16(vacc3x0123, vget_low_s16(vprod3x01234567c4));
275               vacc3x4567 = vaddw_s16(vacc3x4567, vget_high_s16(vprod3x01234567c4));
276 
277               if (k >= 6 * sizeof(int8_t)) {
278                 const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
279 
280                 const int16x8_t vprod0x01234567c5 = vmull_s8(vb01234567c5, vdup_lane_s8(va0, 5));
281                 vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c5));
282                 vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c5));
283                 const int16x8_t vprod1x01234567c5 = vmull_s8(vb01234567c5, vdup_lane_s8(va1, 5));
284                 vacc1x0123 = vaddw_s16(vacc1x0123, vget_low_s16(vprod1x01234567c5));
285                 vacc1x4567 = vaddw_s16(vacc1x4567, vget_high_s16(vprod1x01234567c5));
286                 const int16x8_t vprod2x01234567c5 = vmull_s8(vb01234567c5, vdup_lane_s8(va2, 5));
287                 vacc2x0123 = vaddw_s16(vacc2x0123, vget_low_s16(vprod2x01234567c5));
288                 vacc2x4567 = vaddw_s16(vacc2x4567, vget_high_s16(vprod2x01234567c5));
289                 const int16x8_t vprod3x01234567c5 = vmull_s8(vb01234567c5, vdup_lane_s8(va3, 5));
290                 vacc3x0123 = vaddw_s16(vacc3x0123, vget_low_s16(vprod3x01234567c5));
291                 vacc3x4567 = vaddw_s16(vacc3x4567, vget_high_s16(vprod3x01234567c5));
292 
293                 if (k > 6 * sizeof(int8_t)) {
294                   const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
295 
296                   const int16x8_t vprod0x01234567c6 = vmull_s8(vb01234567c6, vdup_lane_s8(va0, 6));
297                   vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c6));
298                   vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c6));
299                   const int16x8_t vprod1x01234567c6 = vmull_s8(vb01234567c6, vdup_lane_s8(va1, 6));
300                   vacc1x0123 = vaddw_s16(vacc1x0123, vget_low_s16(vprod1x01234567c6));
301                   vacc1x4567 = vaddw_s16(vacc1x4567, vget_high_s16(vprod1x01234567c6));
302                   const int16x8_t vprod2x01234567c6 = vmull_s8(vb01234567c6, vdup_lane_s8(va2, 6));
303                   vacc2x0123 = vaddw_s16(vacc2x0123, vget_low_s16(vprod2x01234567c6));
304                   vacc2x4567 = vaddw_s16(vacc2x4567, vget_high_s16(vprod2x01234567c6));
305                   const int16x8_t vprod3x01234567c6 = vmull_s8(vb01234567c6, vdup_lane_s8(va3, 6));
306                   vacc3x0123 = vaddw_s16(vacc3x0123, vget_low_s16(vprod3x01234567c6));
307                   vacc3x4567 = vaddw_s16(vacc3x4567, vget_high_s16(vprod3x01234567c6));
308                 }
309               }
310             }
311           }
312         }
313       }
314     }
315 
316     // Post-accumulation work
317     const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
318     const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
319     const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
320 
321     vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
322     vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
323     vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift);
324     vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift);
325     vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift);
326     vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift);
327     vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift);
328     vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift);
329 
330     vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
331     vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
332     vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
333     vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
334     vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
335     vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
336     vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
337     vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
338 
339     vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
340     vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
341     vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
342     vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
343     vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
344     vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
345     vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
346     vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
347 
348     const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
349 #if XNN_ARCH_ARM64
350     const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
351     const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
352     const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
353     const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
354 
355     int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
356     int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
357 #else
358     const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
359     const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
360     const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
361     const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
362 
363     int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
364     int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
365 #endif
366     const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
367     const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
368 
369     vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
370     vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
371 
372     vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
373     vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
374 
375     if (nc >= 8) {
376       // Main case where there the 8 columns fit in the destination.
377       vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
378       vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
379       vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
380       vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
381 
382       // Advance to the next 8 columns.
383       c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
384       c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
385       c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
386       c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
387 
388       a0 = (const int8_t*) ((uintptr_t) a0 - kc);
389       a1 = (const int8_t*) ((uintptr_t) a1 - kc);
390       a2 = (const int8_t*) ((uintptr_t) a2 - kc);
391       a3 = (const int8_t*) ((uintptr_t) a3 - kc);
392 
393       nc -= 8;
394     } else {
395       // Final case where not all of the 8 columns fit in the destination.
396       if (nc & 4) {
397         vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
398         vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
399         vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
400         vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
401         vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
402         vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
403       }
404       if (nc & 2) {
405         vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
406         vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
407         vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
408         vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
409         vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
410         vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
411       }
412       if (nc & 1) {
413         vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
414         vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
415         vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
416         vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
417       }
418 
419       nc = 0;
420     }
421   } while (nc != 0);
422 }
423