xref: /aosp_15_r20/external/XNNPACK/src/qc8-gemm/gen/4x8-minmax-fp32-neonv8-mlal-lane-prfm.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-gemm/neon-mlal-lane.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/gemm.h>
16 #include <xnnpack/intrinsics-polyfill.h>
17 
18 
xnn_qc8_gemm_minmax_fp32_ukernel_4x8__neonv8_mlal_lane_prfm(size_t mr,size_t nc,size_t kc,const int8_t * restrict a,size_t a_stride,const void * restrict w,int8_t * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_qc8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_qc8_gemm_minmax_fp32_ukernel_4x8__neonv8_mlal_lane_prfm(
20     size_t mr,
21     size_t nc,
22     size_t kc,
23     const int8_t* restrict a,
24     size_t a_stride,
25     const void* restrict w,
26     int8_t* restrict c,
27     size_t cm_stride,
28     size_t cn_stride,
29     const union xnn_qc8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
30 {
31   assert(mr != 0);
32   assert(mr <= 4);
33   assert(nc != 0);
34   assert(kc != 0);
35   assert(kc % sizeof(int8_t) == 0);
36   assert(a != NULL);
37   assert(w != NULL);
38   assert(c != NULL);
39 
40   const int8_t* a0 = a;
41   int8_t* c0 = c;
42   const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
43   int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
44   if XNN_UNPREDICTABLE(mr < 2) {
45     a1 = a0;
46     c1 = c0;
47   }
48   const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
49   int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
50   if XNN_UNPREDICTABLE(mr <= 2) {
51     a2 = a1;
52     c2 = c1;
53   }
54   const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
55   int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
56   if XNN_UNPREDICTABLE(mr != 4) {
57     a3 = a2;
58     c3 = c2;
59   }
60 
61   do {
62     int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
63     int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
64     int32x4_t vacc1x0123 = vacc0x0123;
65     int32x4_t vacc1x4567 = vacc0x4567;
66     int32x4_t vacc2x0123 = vacc0x0123;
67     int32x4_t vacc2x4567 = vacc0x4567;
68     int32x4_t vacc3x0123 = vacc0x0123;
69     int32x4_t vacc3x4567 = vacc0x4567;
70 
71     size_t k = kc;
72     while (k >= 8 * sizeof(int8_t)) {
73       const int8x8_t va0 = vld1_s8(a0); a0 += 8;
74       const int16x8_t vxa0 = vmovl_s8(va0);
75       const int8x8_t va1 = vld1_s8(a1); a1 += 8;
76       const int16x8_t vxa1 = vmovl_s8(va1);
77       const int8x8_t va2 = vld1_s8(a2); a2 += 8;
78       const int16x8_t vxa2 = vmovl_s8(va2);
79       const int8x8_t va3 = vld1_s8(a3); a3 += 8;
80       const int16x8_t vxa3 = vmovl_s8(va3);
81 
82       const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
83       const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
84 
85       vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
86       vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
87       vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
88       vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
89       vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa2), 0);
90       vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa2), 0);
91       vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa3), 0);
92       vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa3), 0);
93       const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
94       const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
95 
96       vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
97       vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
98       vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
99       vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
100       vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa2), 1);
101       vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa2), 1);
102       vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa3), 1);
103       vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa3), 1);
104       const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
105       const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
106 
107       vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
108       vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
109       vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
110       vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
111       vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa2), 2);
112       vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa2), 2);
113       vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa3), 2);
114       vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa3), 2);
115       const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
116       const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
117 
118       vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
119       vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
120       vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
121       vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
122       vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa2), 3);
123       vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa2), 3);
124       vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa3), 3);
125       vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa3), 3);
126 
127       __builtin_prefetch((const int8_t*) w + 448);
128 
129       const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
130       const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
131 
132       vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
133       vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
134       vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa1), 0);
135       vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa1), 0);
136       vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa2), 0);
137       vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa2), 0);
138       vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa3), 0);
139       vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa3), 0);
140       const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
141       const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
142 
143       vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
144       vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
145       vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa1), 1);
146       vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa1), 1);
147       vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa2), 1);
148       vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa2), 1);
149       vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa3), 1);
150       vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa3), 1);
151       const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
152       const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
153 
154       vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
155       vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
156       vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa1), 2);
157       vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa1), 2);
158       vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa2), 2);
159       vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa2), 2);
160       vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa3), 2);
161       vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa3), 2);
162       const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
163       const int16x8_t vxb01234567c7 = vmovl_s8(vb01234567c7);
164 
165       vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
166       vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
167       vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa1), 3);
168       vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa1), 3);
169       vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa2), 3);
170       vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa2), 3);
171       vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa3), 3);
172       vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa3), 3);
173 
174       k -= 8 * sizeof(int8_t);
175     }
176     if XNN_UNLIKELY(k != 0) {
177       const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
178       const int16x8_t vxa0 = vmovl_s8(va0);
179       const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
180       const int16x8_t vxa1 = vmovl_s8(va1);
181       const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
182       const int16x8_t vxa2 = vmovl_s8(va2);
183       const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k);
184       const int16x8_t vxa3 = vmovl_s8(va3);
185 
186       const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
187       const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
188 
189       vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
190       vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
191       vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
192       vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
193       vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa2), 0);
194       vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa2), 0);
195       vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa3), 0);
196       vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa3), 0);
197 
198       if (k >= 2 * sizeof(int8_t)) {
199         const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
200         const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
201 
202         vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
203         vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
204         vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
205         vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
206         vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa2), 1);
207         vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa2), 1);
208         vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa3), 1);
209         vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa3), 1);
210 
211         if (k > 2 * sizeof(int8_t)) {
212           const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
213           const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
214 
215           vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
216           vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
217           vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
218           vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
219           vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa2), 2);
220           vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa2), 2);
221           vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa3), 2);
222           vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa3), 2);
223 
224           if (k >= 4 * sizeof(int8_t)) {
225             const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
226             const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
227 
228             vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
229             vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
230             vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
231             vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
232             vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa2), 3);
233             vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa2), 3);
234             vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa3), 3);
235             vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa3), 3);
236 
237             if (k > 4 * sizeof(int8_t)) {
238               const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
239               const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
240 
241               vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
242               vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
243               vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa1), 0);
244               vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa1), 0);
245               vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa2), 0);
246               vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa2), 0);
247               vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa3), 0);
248               vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa3), 0);
249 
250               if (k >= 6 * sizeof(int8_t)) {
251                 const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
252                 const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
253 
254                 vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
255                 vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
256                 vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa1), 1);
257                 vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa1), 1);
258                 vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa2), 1);
259                 vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa2), 1);
260                 vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa3), 1);
261                 vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa3), 1);
262 
263                 if (k > 6 * sizeof(int8_t)) {
264                   const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
265                   const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
266 
267                   vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
268                   vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
269                   vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa1), 2);
270                   vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa1), 2);
271                   vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa2), 2);
272                   vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa2), 2);
273                   vacc3x0123 = vmlal_lane_s16(vacc3x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa3), 2);
274                   vacc3x4567 = vmlal_lane_s16(vacc3x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa3), 2);
275                 }
276               }
277             }
278           }
279         }
280       }
281     }
282 
283     float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
284     float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
285     float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123);
286     float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567);
287     float32x4_t vfpacc2x0123 = vcvtq_f32_s32(vacc2x0123);
288     float32x4_t vfpacc2x4567 = vcvtq_f32_s32(vacc2x4567);
289     float32x4_t vfpacc3x0123 = vcvtq_f32_s32(vacc3x0123);
290     float32x4_t vfpacc3x4567 = vcvtq_f32_s32(vacc3x4567);
291 
292     const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
293     vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
294     vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123);
295     vfpacc2x0123 = vmulq_f32(vfpacc2x0123, vscale0123);
296     vfpacc3x0123 = vmulq_f32(vfpacc3x0123, vscale0123);
297     const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
298     vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
299     vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567);
300     vfpacc2x4567 = vmulq_f32(vfpacc2x4567, vscale4567);
301     vfpacc3x4567 = vmulq_f32(vfpacc3x4567, vscale4567);
302 
303     vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
304     vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
305     vacc1x0123 = vcvtnq_s32_f32(vfpacc1x0123);
306     vacc1x4567 = vcvtnq_s32_f32(vfpacc1x4567);
307     vacc2x0123 = vcvtnq_s32_f32(vfpacc2x0123);
308     vacc2x4567 = vcvtnq_s32_f32(vfpacc2x4567);
309     vacc3x0123 = vcvtnq_s32_f32(vfpacc3x0123);
310     vacc3x4567 = vcvtnq_s32_f32(vfpacc3x4567);
311 
312     const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->fp32_neonv8.output_zero_point);
313 #if XNN_ARCH_ARM64
314     int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
315     int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
316     int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567);
317     int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567);
318 
319     vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
320     vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
321     vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
322     vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
323 
324     int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
325     int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
326 #else
327     int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
328     int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
329     int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567));
330     int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567));
331 
332     vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
333     vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
334     vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
335     vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
336 
337     int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
338     int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
339 #endif
340 
341     const int8x16_t voutput_min = vld1q_dup_s8(&params->fp32_neonv8.output_min);
342     vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
343     vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
344 
345     const int8x16_t voutput_max = vld1q_dup_s8(&params->fp32_neonv8.output_max);
346     vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
347     vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
348 
349     if (nc >= 8) {
350       vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
351       vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
352       vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
353       vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
354 
355       c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
356       c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
357       c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
358       c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
359 
360       a0 = (const int8_t*) ((uintptr_t) a0 - kc);
361       a1 = (const int8_t*) ((uintptr_t) a1 - kc);
362       a2 = (const int8_t*) ((uintptr_t) a2 - kc);
363       a3 = (const int8_t*) ((uintptr_t) a3 - kc);
364 
365       nc -= 8;
366     } else {
367       if (nc & 4) {
368         vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
369         vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
370         vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
371         vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
372         vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
373         vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
374       }
375       if (nc & 2) {
376         vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
377         vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
378         vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
379         vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
380         vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
381         vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
382       }
383       if (nc & 1) {
384         vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
385         vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
386         vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
387         vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
388       }
389 
390       nc = 0;
391     }
392   } while (nc != 0);
393 }
394