1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-igemm/c2-neon-mull-dup.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/gemm.h>
15 #include <xnnpack/math.h>
16
xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r(size_t mr,size_t nc,size_t kc,size_t ks,const int8_t ** restrict a,const void * restrict w,int8_t * restrict c,size_t cm_stride,size_t cn_stride,size_t a_offset,const int8_t * zero,const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_qs8_igemm_minmax_rndnu_ukernel_3x16c2__neon_mull_ld1r(
18 size_t mr,
19 size_t nc,
20 size_t kc,
21 size_t ks,
22 const int8_t** restrict a,
23 const void* restrict w,
24 int8_t* restrict c,
25 size_t cm_stride,
26 size_t cn_stride,
27 size_t a_offset,
28 const int8_t* zero,
29 const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
30 {
31 assert(mr != 0);
32 assert(mr <= 3);
33 assert(nc != 0);
34 assert(kc != 0);
35 assert(ks != 0);
36 assert(ks % (3 * sizeof(void*)) == 0);
37 assert(a_offset % sizeof(int8_t) == 0);
38 assert(a != NULL);
39 assert(w != NULL);
40 assert(c != NULL);
41
42 kc = round_up_po2(kc, 2 * sizeof(int8_t));
43 int8_t* c0 = c;
44 int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
45 if XNN_UNPREDICTABLE(mr < 2) {
46 c1 = c0;
47 }
48 int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
49 if XNN_UNPREDICTABLE(mr <= 2) {
50 c2 = c1;
51 }
52
53 do {
54 int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
55 int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
56 int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
57 int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
58 int32x4_t vacc1x0123 = vacc0x0123;
59 int32x4_t vacc1x4567 = vacc0x4567;
60 int32x4_t vacc1x89AB = vacc0x89AB;
61 int32x4_t vacc1xCDEF = vacc0xCDEF;
62 int32x4_t vacc2x0123 = vacc0x0123;
63 int32x4_t vacc2x4567 = vacc0x4567;
64 int32x4_t vacc2x89AB = vacc0x89AB;
65 int32x4_t vacc2xCDEF = vacc0xCDEF;
66
67 size_t p = ks;
68 do {
69 const int8_t* restrict a0 = a[0];
70 if XNN_UNPREDICTABLE(a0 != zero) {
71 a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
72 }
73 const int8_t* restrict a1 = a[1];
74 if XNN_UNPREDICTABLE(a1 != zero) {
75 a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
76 }
77 const int8_t* restrict a2 = a[2];
78 if XNN_UNPREDICTABLE(a2 != zero) {
79 a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
80 }
81 a += 3;
82
83 size_t k = kc;
84
85
86 while (k >= 8 * sizeof(int8_t)) {
87 const int16x4_t va00 = vld1_dup_s16((const void*)a0);
88 const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
89 const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
90 const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
91 const int16x4_t va10 = vld1_dup_s16((const void*)a1);
92 const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
93 const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
94 const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
95 const int16x4_t va20 = vld1_dup_s16((const void*)a2);
96 const int16x4_t va21 = vld1_dup_s16((const void*)(a2 + 2));
97 const int16x4_t va22 = vld1_dup_s16((const void*)(a2 + 4));
98 const int16x4_t va23 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8;
99
100 const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
101 const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
102 const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
103 const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
104 const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
105 const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
106 const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
107 const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
108 const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
109 const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
110 const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
111 const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
112 const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
113 const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
114 const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
115 const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
116
117 const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
118 const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
119 const int8x8_t va2c0 = vreinterpret_s8_s16(va20);
120
121 const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
122 const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
123 const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
124 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
125 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
126 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
127 const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
128 const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
129 const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
130 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
131 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
132 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
133 const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
134 const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
135 const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
136 vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
137 vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
138 vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
139 const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
140 const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
141 const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
142 vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
143 vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
144 vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
145 const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
146 const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
147 const int8x8_t va2c1 = vreinterpret_s8_s16(va21);
148
149 const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
150 const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
151 const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
152 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
153 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
154 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
155 const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
156 const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
157 const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
158 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
159 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
160 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
161 const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
162 const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
163 const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
164 vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
165 vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
166 vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
167 const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
168 const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
169 const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
170 vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
171 vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
172 vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
173 const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
174 const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
175 const int8x8_t va2c2 = vreinterpret_s8_s16(va22);
176
177 const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
178 const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
179 const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
180 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
181 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
182 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
183 const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
184 const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
185 const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
186 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
187 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
188 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
189 const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
190 const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
191 const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
192 vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
193 vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
194 vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
195 const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
196 const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
197 const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
198 vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
199 vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
200 vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
201 const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
202 const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
203 const int8x8_t va2c3 = vreinterpret_s8_s16(va23);
204
205 const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
206 const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
207 const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
208 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
209 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
210 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
211 const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
212 const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
213 const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
214 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
215 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
216 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
217 const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
218 const int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3, va1c3);
219 const int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3, va2c3);
220 vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
221 vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3);
222 vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3);
223 const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
224 const int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3, va1c3);
225 const int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3, va2c3);
226 vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
227 vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3);
228 vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3);
229
230 k -= 8 * sizeof(int8_t);
231 }
232
233 if XNN_UNLIKELY(k != 0) {
234 const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
235 const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
236 const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
237
238 const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
239 const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
240 const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
241 const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
242
243 const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
244 const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
245 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
246 const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
247 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
248 const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
249 vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
250 const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
251 vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
252 const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
253 const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
254 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
255 const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
256 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
257 const int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0, va1c0);
258 vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0);
259 const int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0, va1c0);
260 vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0);
261 const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
262 const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
263 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
264 const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
265 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
266 const int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0, va2c0);
267 vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0);
268 const int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0, va2c0);
269 vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0);
270
271 if (k > 2 * sizeof(int8_t)) {
272 const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
273 const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
274 const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
275 const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
276
277 const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
278 const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
279 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
280 const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
281 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
282 const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
283 vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
284 const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
285 vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
286 const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
287 const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
288 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
289 const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
290 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
291 const int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1, va1c1);
292 vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1);
293 const int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1, va1c1);
294 vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1);
295 const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
296 const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
297 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
298 const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
299 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
300 const int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1, va2c1);
301 vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1);
302 const int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1, va2c1);
303 vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1);
304
305 if (k > 4 * sizeof(int8_t)) {
306 const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
307 const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
308 const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
309 const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
310
311 const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
312 const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
313 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
314 const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
315 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
316 const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
317 vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
318 const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
319 vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
320 const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
321 const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
322 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
323 const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
324 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
325 const int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2, va1c2);
326 vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2);
327 const int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2, va1c2);
328 vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2);
329 const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
330 const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
331 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
332 const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
333 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
334 const int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2, va2c2);
335 vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2);
336 const int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2, va2c2);
337 vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2);
338 }
339 }
340 }
341 p -= 3 * sizeof(void*);
342 } while (p != 0);
343
344 const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
345 const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
346 const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
347
348 vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
349 vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
350 vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
351 vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
352 vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift);
353 vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift);
354 vacc1x89AB = vqshlq_s32(vacc1x89AB, vright_pre_shift);
355 vacc1xCDEF = vqshlq_s32(vacc1xCDEF, vright_pre_shift);
356 vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift);
357 vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift);
358 vacc2x89AB = vqshlq_s32(vacc2x89AB, vright_pre_shift);
359 vacc2xCDEF = vqshlq_s32(vacc2xCDEF, vright_pre_shift);
360
361 vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
362 vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
363 vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
364 vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
365 vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
366 vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
367 vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
368 vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
369 vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
370 vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
371 vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
372 vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
373
374 vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
375 vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
376 vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
377 vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
378 vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
379 vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
380 vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
381 vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
382 vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
383 vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
384 vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
385 vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
386
387 const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
388 #if XNN_ARCH_ARM64
389 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
390 int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
391 int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
392 int16x8_t vacc1x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF);
393 int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567);
394 int16x8_t vacc2x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF);
395
396 vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
397 vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
398 vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
399 vacc1x89ABCDEF = vqaddq_s16(vacc1x89ABCDEF, voutput_zero_point);
400 vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
401 vacc2x89ABCDEF = vqaddq_s16(vacc2x89ABCDEF, voutput_zero_point);
402
403 int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
404 int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF);
405 int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF);
406 #else
407 int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
408 int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
409 int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
410 int16x8_t vacc1x89ABCDEF = vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF));
411 int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567));
412 int16x8_t vacc2x89ABCDEF = vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF));
413
414 vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
415 vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
416 vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
417 vacc1x89ABCDEF = vqaddq_s16(vacc1x89ABCDEF, voutput_zero_point);
418 vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
419 vacc2x89ABCDEF = vqaddq_s16(vacc2x89ABCDEF, voutput_zero_point);
420
421 int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
422 int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF));
423 int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF));
424 #endif
425
426 const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
427 vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
428 vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min);
429 vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min);
430
431 const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
432 vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
433 vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max);
434 vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max);
435
436 if (nc >= 16) {
437 vst1q_s8(c2 + 0, vout2x0123456789ABCDEF);
438 vst1q_s8(c1 + 0, vout1x0123456789ABCDEF);
439 vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
440
441 c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
442 c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
443 c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
444
445 a = (const int8_t**restrict) ((uintptr_t) a - ks);
446
447 nc -= 16;
448 } else {
449 int8x8_t vout2x01234567 = vget_low_s8(vout2x0123456789ABCDEF);
450 int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF));
451 if (nc & 8) {
452 vst1_s8(c2, vout2x01234567); c2 += 8;
453 vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8;
454 vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8;
455 vout2x01234567 = vget_high_s8(vout2x0123456789ABCDEF);
456 vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF));
457 }
458 if (nc & 4) {
459 vst1_lane_u32((void*) c2, vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4;
460 vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
461 vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
462 vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4);
463 vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
464 }
465 if (nc & 2) {
466 vst1_lane_u16((void*) c2, vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2;
467 vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
468 vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
469 vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2);
470 vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
471 }
472 if (nc & 1) {
473 vst1_lane_s8(c2, vout2x01234567, 0);
474 vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
475 vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
476 }
477
478 nc = 0;
479 }
480 } while (nc != 0);
481 }
482