1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-igemm/c2-neon-mull-shuffle.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/gemm.h>
15 #include <xnnpack/math.h>
16
17
xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2s4__neon_mull(size_t mr,size_t nc,size_t kc,size_t ks,const int8_t ** restrict a,const void * restrict w,int8_t * restrict c,size_t cm_stride,size_t cn_stride,size_t a_offset,const int8_t * zero,const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2s4__neon_mull(
19 size_t mr,
20 size_t nc,
21 size_t kc,
22 size_t ks,
23 const int8_t** restrict a,
24 const void* restrict w,
25 int8_t* restrict c,
26 size_t cm_stride,
27 size_t cn_stride,
28 size_t a_offset,
29 const int8_t* zero,
30 const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
31 {
32 assert(mr != 0);
33 assert(mr <= 4);
34 assert(nc != 0);
35 assert(kc != 0);
36 assert(ks != 0);
37 assert(ks % (4 * sizeof(void*)) == 0);
38 assert(a_offset % sizeof(int8_t) == 0);
39 assert(a != NULL);
40 assert(w != NULL);
41 assert(c != NULL);
42
43 int8_t* c0 = c;
44 int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
45 if XNN_UNPREDICTABLE(mr < 2) {
46 c1 = c0;
47 }
48 int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
49 if XNN_UNPREDICTABLE(mr <= 2) {
50 c2 = c1;
51 }
52 int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
53 if XNN_UNPREDICTABLE(mr != 4) {
54 c3 = c2;
55 }
56
57 kc = round_up_po2(kc, 8 * sizeof(int8_t));
58 do {
59 int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
60 int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
61 int32x4_t vacc1x0123 = vacc0x0123;
62 int32x4_t vacc1x4567 = vacc0x4567;
63 int32x4_t vacc2x0123 = vacc0x0123;
64 int32x4_t vacc2x4567 = vacc0x4567;
65 int32x4_t vacc3x0123 = vacc0x0123;
66 int32x4_t vacc3x4567 = vacc0x4567;
67
68 size_t p = ks;
69 do {
70 const int8_t* restrict a0 = a[0];
71 if XNN_UNPREDICTABLE(a0 != zero) {
72 a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
73 }
74 const int8_t* restrict a1 = a[1];
75 if XNN_UNPREDICTABLE(a1 != zero) {
76 a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
77 }
78 const int8_t* restrict a2 = a[2];
79 if XNN_UNPREDICTABLE(a2 != zero) {
80 a2 = (const int8_t*) ((uintptr_t) a2 + a_offset);
81 }
82 const int8_t* restrict a3 = a[3];
83 if XNN_UNPREDICTABLE(a3 != zero) {
84 a3 = (const int8_t*) ((uintptr_t) a3 + a_offset);
85 }
86 a += 4;
87
88 size_t k = kc;
89 do {
90 int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
91 int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
92 int8x8_t va2x0 = vld1_s8(a2); a2 += 8;
93 int8x8_t va3x0 = vld1_s8(a3); a3 += 8;
94
95 const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
96 const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
97 const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
98 const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
99 const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
100 const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
101 const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
102 const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
103
104 int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0);
105 int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1x0);
106 int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2x0);
107 int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0x0, va3x0);
108 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
109 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
110 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
111 vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0);
112 int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0);
113 int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1x0);
114 int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2x0);
115 int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0x0, va3x0);
116 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
117 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
118 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
119 vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0);
120 va0x0 = vext_s8(va0x0, va0x0, 2);
121 va1x0 = vext_s8(va1x0, va1x0, 2);
122 va2x0 = vext_s8(va2x0, va2x0, 2);
123 va3x0 = vext_s8(va3x0, va3x0, 2);
124 int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0);
125 int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1x0);
126 int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2x0);
127 int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1x0, va3x0);
128 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
129 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
130 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
131 vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1);
132 int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0);
133 int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1x0);
134 int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2x0);
135 int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1x0, va3x0);
136 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
137 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
138 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
139 vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1);
140 va0x0 = vext_s8(va0x0, va0x0, 2);
141 va1x0 = vext_s8(va1x0, va1x0, 2);
142 va2x0 = vext_s8(va2x0, va2x0, 2);
143 va3x0 = vext_s8(va3x0, va3x0, 2);
144 int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0);
145 int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1x0);
146 int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2x0);
147 int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2x0, va3x0);
148 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
149 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
150 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
151 vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2);
152 int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0);
153 int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1x0);
154 int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2x0);
155 int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2x0, va3x0);
156 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
157 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
158 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
159 vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2);
160 va0x0 = vext_s8(va0x0, va0x0, 2);
161 va1x0 = vext_s8(va1x0, va1x0, 2);
162 va2x0 = vext_s8(va2x0, va2x0, 2);
163 va3x0 = vext_s8(va3x0, va3x0, 2);
164 int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0);
165 int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1x0);
166 int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2x0);
167 int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3x0, va3x0);
168 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
169 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
170 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
171 vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3);
172 int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0);
173 int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1x0);
174 int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2x0);
175 int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3x0, va3x0);
176 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
177 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
178 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
179 vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3);
180
181 k -= 8 * sizeof(int8_t);
182 } while (k != 0);
183
184 p -= 4 * sizeof(void*);
185 } while (p != 0);
186
187 const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
188 const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
189 const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
190
191 vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
192 vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
193 vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift);
194 vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift);
195 vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift);
196 vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift);
197 vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift);
198 vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift);
199
200 vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
201 vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
202 vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
203 vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
204 vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
205 vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
206 vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
207 vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
208
209 vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
210 vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
211 vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
212 vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
213 vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
214 vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
215 vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
216 vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
217
218 const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
219 #if XNN_ARCH_ARM64
220 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
221 int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
222 int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567);
223 int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567);
224
225 vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
226 vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
227 vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
228 vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
229
230 int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
231 int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
232 #else
233 int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
234 int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
235 int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567));
236 int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567));
237
238 vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
239 vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
240 vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
241 vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
242
243 int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
244 int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
245 #endif
246
247 const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
248 vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
249 vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
250
251 const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
252 vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
253 vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
254
255 if (nc >= 8) {
256 vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
257 vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
258 vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
259 vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
260
261 c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
262 c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
263 c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
264 c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
265
266 a = (const int8_t**restrict) ((uintptr_t) a - ks);
267
268 nc -= 8;
269 } else {
270 if (nc & 4) {
271 vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
272 vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
273 vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
274 vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
275 vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
276 vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
277 }
278 if (nc & 2) {
279 vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
280 vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
281 vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
282 vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
283 vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
284 vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
285 }
286 if (nc & 1) {
287 vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
288 vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
289 vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
290 vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
291 }
292
293 nc = 0;
294 }
295 } while (nc != 0);
296 }
297