xref: /aosp_15_r20/external/XNNPACK/src/qs8-igemm/gen/4x8c4-minmax-rndnu-neon-mlal-ld1r.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-igemm/c4-neon-mull-dup.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/gemm.h>
15 #include <xnnpack/math.h>
16 
17 
xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4__neon_mlal_ld1r(size_t mr,size_t nc,size_t kc,size_t ks,const int8_t ** restrict a,const void * restrict w,int8_t * restrict c,size_t cm_stride,size_t cn_stride,size_t a_offset,const int8_t * zero,const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4__neon_mlal_ld1r(
19     size_t mr,
20     size_t nc,
21     size_t kc,
22     size_t ks,
23     const int8_t** restrict a,
24     const void* restrict w,
25     int8_t* restrict c,
26     size_t cm_stride,
27     size_t cn_stride,
28     size_t a_offset,
29     const int8_t* zero,
30     const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
31 {
32   assert(mr != 0);
33   assert(mr <= 4);
34   assert(nc != 0);
35   assert(kc != 0);
36   assert(ks != 0);
37   assert(ks % (4 * sizeof(void*)) == 0);
38   assert(a_offset % sizeof(int8_t) == 0);
39   assert(a != NULL);
40   assert(w != NULL);
41   assert(c != NULL);
42 
43   kc = round_up_po2(kc, 4 * sizeof(int8_t));
44   int8_t* c0 = c;
45   int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
46   if XNN_UNPREDICTABLE(mr < 2) {
47     c1 = c0;
48   }
49   int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
50   if XNN_UNPREDICTABLE(mr <= 2) {
51     c2 = c1;
52   }
53   int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
54   if XNN_UNPREDICTABLE(mr != 4) {
55     c3 = c2;
56   }
57 
58   do {
59     int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
60     int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
61     int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
62     int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
63     int32x4_t vacc1x01 = vacc0x01;
64     int32x4_t vacc1x23 = vacc0x23;
65     int32x4_t vacc1x45 = vacc0x45;
66     int32x4_t vacc1x67 = vacc0x67;
67     int32x4_t vacc2x01 = vacc0x01;
68     int32x4_t vacc2x23 = vacc0x23;
69     int32x4_t vacc2x45 = vacc0x45;
70     int32x4_t vacc2x67 = vacc0x67;
71     int32x4_t vacc3x01 = vacc0x01;
72     int32x4_t vacc3x23 = vacc0x23;
73     int32x4_t vacc3x45 = vacc0x45;
74     int32x4_t vacc3x67 = vacc0x67;
75 
76     size_t p = ks;
77     do {
78       const int8_t* restrict a0 = a[0];
79       if XNN_UNPREDICTABLE(a0 != zero) {
80         a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
81       }
82       const int8_t* restrict a1 = a[1];
83       if XNN_UNPREDICTABLE(a1 != zero) {
84         a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
85       }
86       const int8_t* restrict a2 = a[2];
87       if XNN_UNPREDICTABLE(a2 != zero) {
88         a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
89       }
90       const int8_t* restrict a3 = a[3];
91       if XNN_UNPREDICTABLE(a3 != zero) {
92         a3 = (const int8_t*) ((uintptr_t) a3 + a_offset);
93       }
94       a += 4;
95 
96       size_t k = kc;
97 
98       while (k >= 16 * sizeof(int8_t)) {
99         const int32x2_t va00x0 = vld1_dup_s32((const void*)a0);
100         const int32x2_t va01x0 = vld1_dup_s32((const void*)(a0 + 4)); a0 += 8;
101         const int32x2_t va00x1 = vld1_dup_s32((const void*)a0);
102         const int32x2_t va01x1 = vld1_dup_s32((const void*)(a0 + 4)); a0 += 8;
103         const int32x2_t va10x0 = vld1_dup_s32((const void*)a1);
104         const int32x2_t va11x0 = vld1_dup_s32((const void*)(a1 + 4)); a1 += 8;
105         const int32x2_t va10x1 = vld1_dup_s32((const void*)a1);
106         const int32x2_t va11x1 = vld1_dup_s32((const void*)(a1 + 4)); a1 += 8;
107         const int32x2_t va20x0 = vld1_dup_s32((const void*)a2);
108         const int32x2_t va21x0 = vld1_dup_s32((const void*)(a2 + 4)); a2 += 8;
109         const int32x2_t va20x1 = vld1_dup_s32((const void*)a2);
110         const int32x2_t va21x1 = vld1_dup_s32((const void*)(a2 + 4)); a2 += 8;
111         const int32x2_t va30x0 = vld1_dup_s32((const void*)a3);
112         const int32x2_t va31x0 = vld1_dup_s32((const void*)(a3 + 4)); a3 += 8;
113         const int32x2_t va30x1 = vld1_dup_s32((const void*)a3);
114         const int32x2_t va31x1 = vld1_dup_s32((const void*)(a3 + 4)); a3 += 8;
115 
116         const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
117         const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
118         const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
119         const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
120         const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
121         const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
122         const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
123         const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
124 
125         const int8x8_t va0c0x0 = vreinterpret_s8_s32(va00x0);
126         const int8x8_t va0c0x1 = vreinterpret_s8_s32(va00x1);
127         const int8x8_t va1c0x0 = vreinterpret_s8_s32(va10x0);
128         const int8x8_t va1c0x1 = vreinterpret_s8_s32(va10x1);
129         const int8x8_t va2c0x0 = vreinterpret_s8_s32(va20x0);
130         const int8x8_t va2c0x1 = vreinterpret_s8_s32(va20x1);
131         const int8x8_t va3c0x0 = vreinterpret_s8_s32(va30x0);
132         const int8x8_t va3c0x1 = vreinterpret_s8_s32(va30x1);
133 
134         int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0c0x0);
135         int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1c0x0);
136         int16x8_t vprod2x01c0 = vmull_s8(vb01c0x0, va2c0x0);
137         int16x8_t vprod3x01c0 = vmull_s8(vb01c0x0, va3c0x0);
138         const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
139         vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0c0x1);
140         vprod1x01c0 = vmlal_s8(vprod1x01c0, vb01c0x1, va1c0x1);
141         vprod2x01c0 = vmlal_s8(vprod2x01c0, vb01c0x1, va2c0x1);
142         vprod3x01c0 = vmlal_s8(vprod3x01c0, vb01c0x1, va3c0x1);
143         vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
144         vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
145         vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
146         vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0);
147         int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0c0x0);
148         int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1c0x0);
149         int16x8_t vprod2x23c0 = vmull_s8(vb23c0x0, va2c0x0);
150         int16x8_t vprod3x23c0 = vmull_s8(vb23c0x0, va3c0x0);
151         const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
152         vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0c0x1);
153         vprod1x23c0 = vmlal_s8(vprod1x23c0, vb23c0x1, va1c0x1);
154         vprod2x23c0 = vmlal_s8(vprod2x23c0, vb23c0x1, va2c0x1);
155         vprod3x23c0 = vmlal_s8(vprod3x23c0, vb23c0x1, va3c0x1);
156         vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
157         vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
158         vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
159         vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0);
160         int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0c0x0);
161         int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1c0x0);
162         int16x8_t vprod2x45c0 = vmull_s8(vb45c0x0, va2c0x0);
163         int16x8_t vprod3x45c0 = vmull_s8(vb45c0x0, va3c0x0);
164         const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
165         vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0c0x1);
166         vprod1x45c0 = vmlal_s8(vprod1x45c0, vb45c0x1, va1c0x1);
167         vprod2x45c0 = vmlal_s8(vprod2x45c0, vb45c0x1, va2c0x1);
168         vprod3x45c0 = vmlal_s8(vprod3x45c0, vb45c0x1, va3c0x1);
169         vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
170         vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
171         vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
172         vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0);
173         int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0c0x0);
174         int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1c0x0);
175         int16x8_t vprod2x67c0 = vmull_s8(vb67c0x0, va2c0x0);
176         int16x8_t vprod3x67c0 = vmull_s8(vb67c0x0, va3c0x0);
177         const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
178         vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0c0x1);
179         vprod1x67c0 = vmlal_s8(vprod1x67c0, vb67c0x1, va1c0x1);
180         vprod2x67c0 = vmlal_s8(vprod2x67c0, vb67c0x1, va2c0x1);
181         vprod3x67c0 = vmlal_s8(vprod3x67c0, vb67c0x1, va3c0x1);
182         vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
183         vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
184         vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
185         vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0);
186         const int8x8_t va0c1x0 = vreinterpret_s8_s32(va01x0);
187         const int8x8_t va0c1x1 = vreinterpret_s8_s32(va01x1);
188         const int8x8_t va1c1x0 = vreinterpret_s8_s32(va11x0);
189         const int8x8_t va1c1x1 = vreinterpret_s8_s32(va11x1);
190         const int8x8_t va2c1x0 = vreinterpret_s8_s32(va21x0);
191         const int8x8_t va2c1x1 = vreinterpret_s8_s32(va21x1);
192         const int8x8_t va3c1x0 = vreinterpret_s8_s32(va31x0);
193         const int8x8_t va3c1x1 = vreinterpret_s8_s32(va31x1);
194 
195         int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0c1x0);
196         int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1c1x0);
197         int16x8_t vprod2x01c1 = vmull_s8(vb01c1x0, va2c1x0);
198         int16x8_t vprod3x01c1 = vmull_s8(vb01c1x0, va3c1x0);
199         const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
200         vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0c1x1);
201         vprod1x01c1 = vmlal_s8(vprod1x01c1, vb01c1x1, va1c1x1);
202         vprod2x01c1 = vmlal_s8(vprod2x01c1, vb01c1x1, va2c1x1);
203         vprod3x01c1 = vmlal_s8(vprod3x01c1, vb01c1x1, va3c1x1);
204         vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
205         vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
206         vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
207         vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c1);
208         int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0c1x0);
209         int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1c1x0);
210         int16x8_t vprod2x23c1 = vmull_s8(vb23c1x0, va2c1x0);
211         int16x8_t vprod3x23c1 = vmull_s8(vb23c1x0, va3c1x0);
212         const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
213         vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0c1x1);
214         vprod1x23c1 = vmlal_s8(vprod1x23c1, vb23c1x1, va1c1x1);
215         vprod2x23c1 = vmlal_s8(vprod2x23c1, vb23c1x1, va2c1x1);
216         vprod3x23c1 = vmlal_s8(vprod3x23c1, vb23c1x1, va3c1x1);
217         vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
218         vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
219         vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
220         vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c1);
221         int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0c1x0);
222         int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1c1x0);
223         int16x8_t vprod2x45c1 = vmull_s8(vb45c1x0, va2c1x0);
224         int16x8_t vprod3x45c1 = vmull_s8(vb45c1x0, va3c1x0);
225         const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
226         vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0c1x1);
227         vprod1x45c1 = vmlal_s8(vprod1x45c1, vb45c1x1, va1c1x1);
228         vprod2x45c1 = vmlal_s8(vprod2x45c1, vb45c1x1, va2c1x1);
229         vprod3x45c1 = vmlal_s8(vprod3x45c1, vb45c1x1, va3c1x1);
230         vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
231         vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
232         vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
233         vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c1);
234         int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0c1x0);
235         int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1c1x0);
236         int16x8_t vprod2x67c1 = vmull_s8(vb67c1x0, va2c1x0);
237         int16x8_t vprod3x67c1 = vmull_s8(vb67c1x0, va3c1x0);
238         const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
239         vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0c1x1);
240         vprod1x67c1 = vmlal_s8(vprod1x67c1, vb67c1x1, va1c1x1);
241         vprod2x67c1 = vmlal_s8(vprod2x67c1, vb67c1x1, va2c1x1);
242         vprod3x67c1 = vmlal_s8(vprod3x67c1, vb67c1x1, va3c1x1);
243         vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
244         vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
245         vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
246         vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c1);
247 
248         k -= 16 * sizeof(int8_t);
249       }
250 
251       if (k >= 8 * sizeof(int8_t)) {
252         const int32x2_t va00 = vld1_dup_s32((const void*)a0);
253         const int32x2_t va01 = vld1_dup_s32((const void*)(a0 + 4)); a0 += 8;
254         const int32x2_t va10 = vld1_dup_s32((const void*)a1);
255         const int32x2_t va11 = vld1_dup_s32((const void*)(a1 + 4)); a1 += 8;
256         const int32x2_t va20 = vld1_dup_s32((const void*)a2);
257         const int32x2_t va21 = vld1_dup_s32((const void*)(a2 + 4)); a2 += 8;
258         const int32x2_t va30 = vld1_dup_s32((const void*)a3);
259         const int32x2_t va31 = vld1_dup_s32((const void*)(a3 + 4)); a3 += 8;
260 
261         const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
262         const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
263         const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
264         const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
265         const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
266         const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
267         const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
268         const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
269 
270         const int8x8_t va0c0 = vreinterpret_s8_s32(va00);
271         const int8x8_t va1c0 = vreinterpret_s8_s32(va10);
272         const int8x8_t va2c0 = vreinterpret_s8_s32(va20);
273         const int8x8_t va3c0 = vreinterpret_s8_s32(va30);
274 
275         const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
276         const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
277         const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0);
278         const int16x8_t vprod3x01c0 = vmull_s8(vb01c0, va3c0);
279         vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
280         vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
281         vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
282         vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0);
283         const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
284         const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
285         const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0);
286         const int16x8_t vprod3x23c0 = vmull_s8(vb23c0, va3c0);
287         vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
288         vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
289         vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
290         vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0);
291         const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
292         const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
293         const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0);
294         const int16x8_t vprod3x45c0 = vmull_s8(vb45c0, va3c0);
295         vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
296         vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
297         vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
298         vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0);
299         const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
300         const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
301         const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0);
302         const int16x8_t vprod3x67c0 = vmull_s8(vb67c0, va3c0);
303         vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
304         vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
305         vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
306         vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0);
307         const int8x8_t va0c1 = vreinterpret_s8_s32(va01);
308         const int8x8_t va1c1 = vreinterpret_s8_s32(va11);
309         const int8x8_t va2c1 = vreinterpret_s8_s32(va21);
310         const int8x8_t va3c1 = vreinterpret_s8_s32(va31);
311 
312         const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
313         const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1);
314         const int16x8_t vprod2x01c1 = vmull_s8(vb01c1, va2c1);
315         const int16x8_t vprod3x01c1 = vmull_s8(vb01c1, va3c1);
316         vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
317         vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1);
318         vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1);
319         vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c1);
320         const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
321         const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1);
322         const int16x8_t vprod2x23c1 = vmull_s8(vb23c1, va2c1);
323         const int16x8_t vprod3x23c1 = vmull_s8(vb23c1, va3c1);
324         vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
325         vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1);
326         vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1);
327         vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c1);
328         const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
329         const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1);
330         const int16x8_t vprod2x45c1 = vmull_s8(vb45c1, va2c1);
331         const int16x8_t vprod3x45c1 = vmull_s8(vb45c1, va3c1);
332         vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
333         vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1);
334         vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1);
335         vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c1);
336         const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
337         const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1);
338         const int16x8_t vprod2x67c1 = vmull_s8(vb67c1, va2c1);
339         const int16x8_t vprod3x67c1 = vmull_s8(vb67c1, va3c1);
340         vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
341         vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1);
342         vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1);
343         vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c1);
344 
345         k -= 8 * sizeof(int8_t);
346       }
347 
348       if XNN_UNLIKELY(k != 0) {
349         const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
350         const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
351         const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
352         const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
353 
354         const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
355         const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
356         const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
357         const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
358 
359         const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
360         const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
361         vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
362         const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
363         vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
364         const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
365         vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
366         const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
367         vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
368         const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0));
369         const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0);
370         vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0);
371         const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0);
372         vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0);
373         const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0);
374         vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0);
375         const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0);
376         vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0);
377         const int8x8_t va2c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 0));
378         const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0);
379         vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0);
380         const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0);
381         vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0);
382         const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0);
383         vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0);
384         const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0);
385         vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0);
386         const int8x8_t va3c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va3), 0));
387         const int16x8_t vprod3x01c0 = vmull_s8(vb01c0, va3c0);
388         vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0);
389         const int16x8_t vprod3x23c0 = vmull_s8(vb23c0, va3c0);
390         vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0);
391         const int16x8_t vprod3x45c0 = vmull_s8(vb45c0, va3c0);
392         vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0);
393         const int16x8_t vprod3x67c0 = vmull_s8(vb67c0, va3c0);
394         vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0);
395       }
396       p -= 4 * sizeof(void*);
397     } while (p != 0);
398 
399 #if XNN_ARCH_ARM64
400     int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
401     int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
402     int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23);
403     int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67);
404     int32x4_t vacc2x0123 = vpaddq_s32(vacc2x01, vacc2x23);
405     int32x4_t vacc2x4567 = vpaddq_s32(vacc2x45, vacc2x67);
406     int32x4_t vacc3x0123 = vpaddq_s32(vacc3x01, vacc3x23);
407     int32x4_t vacc3x4567 = vpaddq_s32(vacc3x45, vacc3x67);
408 #else
409     const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
410     const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
411     int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
412     const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
413     const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
414     int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
415     const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01));
416     const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23));
417     int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23);
418     const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45));
419     const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67));
420     int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67);
421     const int32x2_t vsum2x01 = vpadd_s32(vget_low_s32(vacc2x01), vget_high_s32(vacc2x01));
422     const int32x2_t vsum2x23 = vpadd_s32(vget_low_s32(vacc2x23), vget_high_s32(vacc2x23));
423     int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23);
424     const int32x2_t vsum2x45 = vpadd_s32(vget_low_s32(vacc2x45), vget_high_s32(vacc2x45));
425     const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67));
426     int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67);
427     const int32x2_t vsum3x01 = vpadd_s32(vget_low_s32(vacc3x01), vget_high_s32(vacc3x01));
428     const int32x2_t vsum3x23 = vpadd_s32(vget_low_s32(vacc3x23), vget_high_s32(vacc3x23));
429     int32x4_t vacc3x0123 = vcombine_s32(vsum3x01, vsum3x23);
430     const int32x2_t vsum3x45 = vpadd_s32(vget_low_s32(vacc3x45), vget_high_s32(vacc3x45));
431     const int32x2_t vsum3x67 = vpadd_s32(vget_low_s32(vacc3x67), vget_high_s32(vacc3x67));
432     int32x4_t vacc3x4567 = vcombine_s32(vsum3x45, vsum3x67);
433 #endif
434 
435     const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
436     const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
437     const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
438 
439     vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
440     vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
441     vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift);
442     vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift);
443     vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift);
444     vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift);
445     vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift);
446     vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift);
447 
448     vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
449     vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
450     vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
451     vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
452     vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
453     vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
454     vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
455     vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
456 
457     vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
458     vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
459     vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
460     vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
461     vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
462     vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
463     vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
464     vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
465 
466     const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
467 #if XNN_ARCH_ARM64
468     int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
469     int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
470     int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567);
471     int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567);
472 
473     vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
474     vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
475     vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
476     vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
477 
478     int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
479     int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
480 #else
481     int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
482     int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
483     int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567));
484     int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567));
485 
486     vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
487     vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
488     vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
489     vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
490 
491     int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
492     int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
493 #endif
494 
495     const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min);
496     vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
497     vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
498 
499     const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max);
500     vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
501     vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
502 
503     if (nc >= 8) {
504       vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
505       vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
506       vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
507       vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
508 
509       c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
510       c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
511       c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
512       c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
513 
514       a = (const int8_t**restrict) ((uintptr_t) a - ks);
515 
516       nc -= 8;
517     } else {
518       if (nc & 4) {
519         vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
520         vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
521         vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
522         vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
523         vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
524         vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
525       }
526       if (nc & 2) {
527         vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
528         vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
529         vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
530         vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
531         vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
532         vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
533       }
534       if (nc & 1) {
535         vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
536         vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
537         vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
538         vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
539       }
540 
541       nc = 0;
542     }
543   } while (nc != 0);
544 }
545