1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-gemm/c2-neon-mull-dup.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/gemm.h>
15 #include <xnnpack/math.h>
16
xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_dup(size_t mr,size_t nc,size_t kc,const int8_t * restrict a,size_t a_stride,const void * restrict w,int8_t * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c2__neon_mull_dup(
18 size_t mr,
19 size_t nc,
20 size_t kc,
21 const int8_t* restrict a,
22 size_t a_stride,
23 const void* restrict w,
24 int8_t* restrict c,
25 size_t cm_stride,
26 size_t cn_stride,
27 const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28 {
29 assert(mr != 0);
30 assert(mr <= 3);
31 assert(nc != 0);
32 assert(kc != 0);
33 assert(kc % sizeof(int8_t) == 0);
34 assert(a != NULL);
35 assert(w != NULL);
36 assert(c != NULL);
37
38 kc = round_up_po2(kc, 2 * sizeof(int8_t));
39 const int8_t* a0 = a;
40 int8_t* c0 = c;
41 const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
42 int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
43 if XNN_UNPREDICTABLE(mr < 2) {
44 a1 = a0;
45 c1 = c0;
46 }
47 const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
48 int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
49 if XNN_UNPREDICTABLE(mr <= 2) {
50 a2 = a1;
51 c2 = c1;
52 }
53
54 do {
55 int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
56 int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
57 int32x4_t vacc1x0123 = vacc0x0123;
58 int32x4_t vacc1x4567 = vacc0x4567;
59 int32x4_t vacc2x0123 = vacc0x0123;
60 int32x4_t vacc2x4567 = vacc0x4567;
61
62 size_t k = kc;
63
64
65 while (k >= 8 * sizeof(int8_t)) {
66 const int8x8_t va0 = vld1_s8(a0); a0 += 8;
67 const int8x8_t va1 = vld1_s8(a1); a1 += 8;
68 const int8x8_t va2 = vld1_s8(a2); a2 += 8;
69
70 const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
71 const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
72 const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
73 const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
74 const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
75 const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
76 const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
77 const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
78
79 const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
80 const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
81 const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
82
83 const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
84 const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
85 const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
86 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
87 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
88 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
89 const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
90 const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
91 const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
92 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
93 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
94 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
95 const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
96 const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
97 const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
98
99 const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
100 const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
101 const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
102 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
103 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
104 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
105 const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
106 const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
107 const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
108 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
109 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
110 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
111 const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
112 const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
113 const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
114
115 const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
116 const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
117 const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
118 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
119 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
120 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
121 const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
122 const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
123 const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
124 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
125 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
126 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
127 const int8x8_t va0c3 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 3));
128 const int8x8_t va1c3 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 3));
129 const int8x8_t va2c3 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 3));
130
131 const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
132 const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
133 const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3);
134 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
135 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
136 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3);
137 const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
138 const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
139 const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3);
140 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
141 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
142 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3);
143
144 k -= 8 * sizeof(int8_t);
145 }
146
147 if XNN_UNLIKELY(k != 0) {
148 const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
149 const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
150 const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k);
151
152 const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
153 const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
154
155 const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
156 const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
157 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
158 const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
159 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
160 const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
161 const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
162 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
163 const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
164 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
165 const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0));
166 const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0);
167 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0);
168 const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0);
169 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0);
170
171 if (k > 2 * sizeof(int8_t)) {
172 const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
173 const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
174
175 const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
176 const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
177 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
178 const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
179 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
180 const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
181 const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
182 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
183 const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
184 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
185 const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1));
186 const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1);
187 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1);
188 const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1);
189 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1);
190
191 if (k > 4 * sizeof(int8_t)) {
192 const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
193 const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
194
195 const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
196 const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
197 vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
198 const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
199 vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
200 const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
201 const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
202 vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
203 const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
204 vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
205 const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2));
206 const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2);
207 vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2);
208 const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2);
209 vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2);
210 }
211 }
212 }
213
214 const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
215 const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
216 const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
217
218 vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
219 vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
220 vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift);
221 vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift);
222 vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift);
223 vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift);
224
225 vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
226 vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
227 vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
228 vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
229 vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
230 vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
231
232 vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
233 vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
234 vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
235 vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
236 vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
237 vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
238
239 const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
240 #if XNN_ARCH_ARM64
241 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
242 int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
243 int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567);
244
245 vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
246 vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
247 vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
248
249 int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
250 int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567);
251 #else
252 int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
253 int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
254 int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567));
255
256 vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
257 vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
258 vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
259
260 int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
261 int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567);
262 #endif
263
264 const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
265 vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
266 vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min));
267
268 const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
269 vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
270 vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max));
271
272 if (nc >= 8) {
273 vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
274 vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
275 vst1_s8(c2 + 0, vout2x01234567);
276
277 c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
278 c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
279 c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
280
281 a0 = (const int8_t*) ((uintptr_t) a0 - kc);
282 a1 = (const int8_t*) ((uintptr_t) a1 - kc);
283 a2 = (const int8_t*) ((uintptr_t) a2 - kc);
284
285 nc -= 8;
286 } else {
287 // Final case where not all of the 8 columns fit in the destination.
288 if (nc & 4) {
289 vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
290 vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
291 vst1_lane_u32((void*) c2, vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4;
292 vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
293 vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4);
294 }
295 if (nc & 2) {
296 vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
297 vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
298 vst1_lane_u16((void*) c2, vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2;
299 vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
300 vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2);
301 }
302 if (nc & 1) {
303 vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
304 vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
305 vst1_lane_s8(c2, vout2x01234567, 0);
306 }
307
308 nc = 0;
309 }
310 } while (nc != 0);
311 }
312