1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-gemm/c2-neon-mull-shuffle.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/gemm.h>
15 #include <xnnpack/math.h>
16
17
xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2s4__neon_mlal(size_t mr,size_t nc,size_t kc,const int8_t * restrict a,size_t a_stride,const void * restrict w,int8_t * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qs8_gemm_minmax_rndnu_ukernel_3x16c2s4__neon_mlal(
19 size_t mr,
20 size_t nc,
21 size_t kc,
22 const int8_t* restrict a,
23 size_t a_stride,
24 const void* restrict w,
25 int8_t* restrict c,
26 size_t cm_stride,
27 size_t cn_stride,
28 const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
29 {
30 assert(mr != 0);
31 assert(mr <= 3);
32 assert(nc != 0);
33 assert(kc != 0);
34 assert(kc % sizeof(int8_t) == 0);
35 assert(a != NULL);
36 assert(w != NULL);
37 assert(c != NULL);
38
39 const int8_t* a0 = a;
40 int8_t* c0 = c;
41 const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
42 int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
43 if XNN_UNPREDICTABLE(mr < 2) {
44 a1 = a0;
45 c1 = c0;
46 }
47 const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
48 int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
49 if XNN_UNPREDICTABLE(mr <= 2) {
50 a2 = a1;
51 c2 = c1;
52 }
53
54 kc = round_up_po2(kc, 8 * sizeof(int8_t));
55 do {
56 int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
57 int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
58 int32x4_t vacc0x89AB = vld1q_s32(w); w = (const int32_t*) w + 4;
59 int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const int32_t*) w + 4;
60 int32x4_t vacc1x0123 = vacc0x0123;
61 int32x4_t vacc1x4567 = vacc0x4567;
62 int32x4_t vacc1x89AB = vacc0x89AB;
63 int32x4_t vacc1xCDEF = vacc0xCDEF;
64 int32x4_t vacc2x0123 = vacc0x0123;
65 int32x4_t vacc2x4567 = vacc0x4567;
66 int32x4_t vacc2x89AB = vacc0x89AB;
67 int32x4_t vacc2xCDEF = vacc0xCDEF;
68
69 size_t k = kc;
70 while (k >= 16 * sizeof(int8_t)) {
71 int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
72 int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
73 int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
74 int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
75 int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
76 int8x8_t va2x1 = vld1_s8(a2); a2 += 8;
77
78 const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
79 const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
80 const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
81 const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
82 const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
83 const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
84 const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
85 const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
86 const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
87 const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
88 const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
89 const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
90 const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
91 const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
92 const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
93 const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
94
95 int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0);
96 int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1x0);
97 int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2x0);
98 const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
99 vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0x1);
100 vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1x1);
101 vprod2x0123c0 = vmlal_s8(vprod2x0123c0, vb0123c0x1, va2x1);
102 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
103 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
104 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
105 int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0);
106 int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1x0);
107 int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2x0);
108 const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
109 vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0x1);
110 vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1x1);
111 vprod2x4567c0 = vmlal_s8(vprod2x4567c0, vb4567c0x1, va2x1);
112 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
113 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
114 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
115 int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0x0);
116 int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0x0, va1x0);
117 int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0x0, va2x0);
118 const int8x8_t vb89ABc0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
119 vprod0x89ABc0 = vmlal_s8(vprod0x89ABc0, vb89ABc0x1, va0x1);
120 vprod1x89ABc0 = vmlal_s8(vprod1x89ABc0, vb89ABc0x1, va1x1);
121 vprod2x89ABc0 = vmlal_s8(vprod2x89ABc0, vb89ABc0x1, va2x1);
122 vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
123 vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
124 vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
125 int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0x0);
126 int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0x0, va1x0);
127 int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0x0, va2x0);
128 const int8x8_t vbCDEFc0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
129 vprod0xCDEFc0 = vmlal_s8(vprod0xCDEFc0, vbCDEFc0x1, va0x1);
130 vprod1xCDEFc0 = vmlal_s8(vprod1xCDEFc0, vbCDEFc0x1, va1x1);
131 vprod2xCDEFc0 = vmlal_s8(vprod2xCDEFc0, vbCDEFc0x1, va2x1);
132 vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
133 vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
134 vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
135 va0x0 = vext_s8(va0x0, va0x0, 2);
136 va0x1 = vext_s8(va0x1, va0x1, 2);
137 va1x0 = vext_s8(va1x0, va1x0, 2);
138 va1x1 = vext_s8(va1x1, va1x1, 2);
139 va2x0 = vext_s8(va2x0, va2x0, 2);
140 va2x1 = vext_s8(va2x1, va2x1, 2);
141 int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0);
142 int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1x0);
143 int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2x0);
144 const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
145 vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0x1);
146 vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1x1);
147 vprod2x0123c1 = vmlal_s8(vprod2x0123c1, vb0123c1x1, va2x1);
148 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
149 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
150 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
151 int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0);
152 int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1x0);
153 int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2x0);
154 const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
155 vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0x1);
156 vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1x1);
157 vprod2x4567c1 = vmlal_s8(vprod2x4567c1, vb4567c1x1, va2x1);
158 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
159 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
160 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
161 int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0x0);
162 int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1x0, va1x0);
163 int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1x0, va2x0);
164 const int8x8_t vb89ABc1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
165 vprod0x89ABc1 = vmlal_s8(vprod0x89ABc1, vb89ABc1x1, va0x1);
166 vprod1x89ABc1 = vmlal_s8(vprod1x89ABc1, vb89ABc1x1, va1x1);
167 vprod2x89ABc1 = vmlal_s8(vprod2x89ABc1, vb89ABc1x1, va2x1);
168 vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
169 vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
170 vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
171 int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0x0);
172 int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1x0, va1x0);
173 int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1x0, va2x0);
174 const int8x8_t vbCDEFc1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
175 vprod0xCDEFc1 = vmlal_s8(vprod0xCDEFc1, vbCDEFc1x1, va0x1);
176 vprod1xCDEFc1 = vmlal_s8(vprod1xCDEFc1, vbCDEFc1x1, va1x1);
177 vprod2xCDEFc1 = vmlal_s8(vprod2xCDEFc1, vbCDEFc1x1, va2x1);
178 vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
179 vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
180 vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
181 va0x0 = vext_s8(va0x0, va0x0, 2);
182 va0x1 = vext_s8(va0x1, va0x1, 2);
183 va1x0 = vext_s8(va1x0, va1x0, 2);
184 va1x1 = vext_s8(va1x1, va1x1, 2);
185 va2x0 = vext_s8(va2x0, va2x0, 2);
186 va2x1 = vext_s8(va2x1, va2x1, 2);
187 int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0);
188 int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1x0);
189 int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2x0);
190 const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const int8_t*) w + 8;
191 vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0x1);
192 vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1x1);
193 vprod2x0123c2 = vmlal_s8(vprod2x0123c2, vb0123c2x1, va2x1);
194 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
195 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
196 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
197 int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0);
198 int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1x0);
199 int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2x0);
200 const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const int8_t*) w + 8;
201 vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0x1);
202 vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1x1);
203 vprod2x4567c2 = vmlal_s8(vprod2x4567c2, vb4567c2x1, va2x1);
204 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
205 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
206 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
207 int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0x0);
208 int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2x0, va1x0);
209 int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2x0, va2x0);
210 const int8x8_t vb89ABc2x1 = vld1_s8(w); w = (const int8_t*) w + 8;
211 vprod0x89ABc2 = vmlal_s8(vprod0x89ABc2, vb89ABc2x1, va0x1);
212 vprod1x89ABc2 = vmlal_s8(vprod1x89ABc2, vb89ABc2x1, va1x1);
213 vprod2x89ABc2 = vmlal_s8(vprod2x89ABc2, vb89ABc2x1, va2x1);
214 vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
215 vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
216 vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
217 int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0x0);
218 int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2x0, va1x0);
219 int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2x0, va2x0);
220 const int8x8_t vbCDEFc2x1 = vld1_s8(w); w = (const int8_t*) w + 8;
221 vprod0xCDEFc2 = vmlal_s8(vprod0xCDEFc2, vbCDEFc2x1, va0x1);
222 vprod1xCDEFc2 = vmlal_s8(vprod1xCDEFc2, vbCDEFc2x1, va1x1);
223 vprod2xCDEFc2 = vmlal_s8(vprod2xCDEFc2, vbCDEFc2x1, va2x1);
224 vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
225 vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
226 vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
227 va0x0 = vext_s8(va0x0, va0x0, 2);
228 va0x1 = vext_s8(va0x1, va0x1, 2);
229 va1x0 = vext_s8(va1x0, va1x0, 2);
230 va1x1 = vext_s8(va1x1, va1x1, 2);
231 va2x0 = vext_s8(va2x0, va2x0, 2);
232 va2x1 = vext_s8(va2x1, va2x1, 2);
233 int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0);
234 int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1x0);
235 int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2x0);
236 const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const int8_t*) w + 8;
237 vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0x1);
238 vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1x1);
239 vprod2x0123c3 = vmlal_s8(vprod2x0123c3, vb0123c3x1, va2x1);
240 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
241 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
242 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
243 int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0);
244 int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1x0);
245 int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2x0);
246 const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const int8_t*) w + 8;
247 vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0x1);
248 vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1x1);
249 vprod2x4567c3 = vmlal_s8(vprod2x4567c3, vb4567c3x1, va2x1);
250 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
251 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
252 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
253 int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0x0);
254 int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3x0, va1x0);
255 int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3x0, va2x0);
256 const int8x8_t vb89ABc3x1 = vld1_s8(w); w = (const int8_t*) w + 8;
257 vprod0x89ABc3 = vmlal_s8(vprod0x89ABc3, vb89ABc3x1, va0x1);
258 vprod1x89ABc3 = vmlal_s8(vprod1x89ABc3, vb89ABc3x1, va1x1);
259 vprod2x89ABc3 = vmlal_s8(vprod2x89ABc3, vb89ABc3x1, va2x1);
260 vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
261 vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
262 vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
263 int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0x0);
264 int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3x0, va1x0);
265 int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3x0, va2x0);
266 const int8x8_t vbCDEFc3x1 = vld1_s8(w); w = (const int8_t*) w + 8;
267 vprod0xCDEFc3 = vmlal_s8(vprod0xCDEFc3, vbCDEFc3x1, va0x1);
268 vprod1xCDEFc3 = vmlal_s8(vprod1xCDEFc3, vbCDEFc3x1, va1x1);
269 vprod2xCDEFc3 = vmlal_s8(vprod2xCDEFc3, vbCDEFc3x1, va2x1);
270 vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
271 vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
272 vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
273
274 k -= 16 * sizeof(int8_t);
275 }
276 if (k != 0) {
277 int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
278 int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
279 int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
280
281 const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
282 const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
283 const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
284 const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
285 const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
286 const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
287 const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
288 const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
289 const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
290 const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
291 const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
292 const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
293 const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
294 const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
295 const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
296 const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
297
298 int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0);
299 int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1x0);
300 int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2x0);
301 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
302 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
303 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
304 int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0);
305 int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1x0);
306 int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2x0);
307 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
308 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
309 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
310 int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0x0);
311 int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0x0, va1x0);
312 int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0x0, va2x0);
313 vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
314 vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
315 vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
316 int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0x0);
317 int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0x0, va1x0);
318 int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0x0, va2x0);
319 vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
320 vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
321 vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
322 va0x0 = vext_s8(va0x0, va0x0, 2);
323 va1x0 = vext_s8(va1x0, va1x0, 2);
324 va2x0 = vext_s8(va2x0, va2x0, 2);
325 int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0);
326 int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1x0);
327 int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2x0);
328 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
329 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
330 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
331 int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0);
332 int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1x0);
333 int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2x0);
334 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
335 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
336 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
337 int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0x0);
338 int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1x0, va1x0);
339 int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1x0, va2x0);
340 vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
341 vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
342 vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
343 int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0x0);
344 int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1x0, va1x0);
345 int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1x0, va2x0);
346 vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
347 vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
348 vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
349 va0x0 = vext_s8(va0x0, va0x0, 2);
350 va1x0 = vext_s8(va1x0, va1x0, 2);
351 va2x0 = vext_s8(va2x0, va2x0, 2);
352 int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0);
353 int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1x0);
354 int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2x0);
355 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
356 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
357 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
358 int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0);
359 int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1x0);
360 int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2x0);
361 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
362 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
363 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
364 int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0x0);
365 int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2x0, va1x0);
366 int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2x0, va2x0);
367 vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
368 vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
369 vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
370 int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0x0);
371 int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2x0, va1x0);
372 int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2x0, va2x0);
373 vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
374 vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
375 vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
376 va0x0 = vext_s8(va0x0, va0x0, 2);
377 va1x0 = vext_s8(va1x0, va1x0, 2);
378 va2x0 = vext_s8(va2x0, va2x0, 2);
379 int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0);
380 int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1x0);
381 int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2x0);
382 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
383 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
384 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
385 int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0);
386 int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1x0);
387 int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2x0);
388 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
389 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
390 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
391 int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0x0);
392 int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3x0, va1x0);
393 int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3x0, va2x0);
394 vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
395 vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
396 vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
397 int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0x0);
398 int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3x0, va1x0);
399 int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3x0, va2x0);
400 vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
401 vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
402 vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
403
404 }
405
406 const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
407 const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
408 const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
409
410 vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
411 vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
412 vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
413 vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
414 vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift);
415 vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift);
416 vacc1x89AB = vqshlq_s32(vacc1x89AB, vright_pre_shift);
417 vacc1xCDEF = vqshlq_s32(vacc1xCDEF, vright_pre_shift);
418 vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift);
419 vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift);
420 vacc2x89AB = vqshlq_s32(vacc2x89AB, vright_pre_shift);
421 vacc2xCDEF = vqshlq_s32(vacc2xCDEF, vright_pre_shift);
422
423 vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
424 vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
425 vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
426 vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
427 vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
428 vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
429 vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
430 vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
431 vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
432 vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
433 vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
434 vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
435
436 vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
437 vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
438 vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
439 vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
440 vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
441 vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
442 vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
443 vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
444 vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
445 vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
446 vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
447 vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
448
449 const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
450 #if XNN_ARCH_ARM64
451 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
452 int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
453 int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
454 int16x8_t vacc1x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF);
455 int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567);
456 int16x8_t vacc2x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF);
457
458 vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
459 vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
460 vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
461 vacc1x89ABCDEF = vqaddq_s16(vacc1x89ABCDEF, voutput_zero_point);
462 vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
463 vacc2x89ABCDEF = vqaddq_s16(vacc2x89ABCDEF, voutput_zero_point);
464
465 int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
466 int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
467 int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF);
468 #else
469 int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
470 int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
471 int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
472 int16x8_t vacc1x89ABCDEF = vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF));
473 int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567));
474 int16x8_t vacc2x89ABCDEF = vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF));
475
476 vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
477 vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
478 vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
479 vacc1x89ABCDEF = vqaddq_s16(vacc1x89ABCDEF, voutput_zero_point);
480 vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
481 vacc2x89ABCDEF = vqaddq_s16(vacc2x89ABCDEF, voutput_zero_point);
482
483 int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
484 int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
485 int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF));
486 #endif
487
488 const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
489 vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
490 vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
491 vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min);
492
493 const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
494 vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
495 vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
496 vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max);
497
498 if (nc >= 16) {
499 vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
500 vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
501 vst1q_s8(c2 + 0, vout2x0123456789ABCDEF);
502
503 c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
504 c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
505 c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
506
507 a0 = (const int8_t*) ((uintptr_t) a0 - kc);
508 a1 = (const int8_t*) ((uintptr_t) a1 - kc);
509 a2 = (const int8_t*) ((uintptr_t) a2 - kc);
510
511 nc -= 16;
512 } else {
513 // Final case where not all of the 16 columns fit in the destination.
514 int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
515 int8x8_t vout2x01234567 = vget_low_s8(vout2x0123456789ABCDEF);
516 if (nc & 8) {
517 vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
518 vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
519 vst1_s8(c2, vout2x01234567); c2 += 8;
520 vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
521 vout2x01234567 = vget_high_s8(vout2x0123456789ABCDEF);
522 }
523 if (nc & 4) {
524 vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
525 vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
526 vst1_lane_u32((void*) c2, vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4;
527 vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
528 vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4);
529 }
530 if (nc & 2) {
531 vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
532 vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
533 vst1_lane_u16((void*) c2, vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2;
534 vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
535 vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2);
536 }
537 if (nc & 1) {
538 vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
539 vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
540 vst1_lane_s8(c2, vout2x01234567, 0);
541 }
542
543 nc = 0;
544 }
545 } while (nc != 0);
546 }
547