1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-gemm/c8-neon-mull.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/gemm.h>
15 #include <xnnpack/math.h>
16
17
xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c8__neon_mull(size_t mr,size_t nc,size_t kc,const int8_t * restrict a,size_t a_stride,const void * restrict w,int8_t * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qs8_gemm_minmax_rndnu_ukernel_4x8c8__neon_mull(
19 size_t mr,
20 size_t nc,
21 size_t kc,
22 const int8_t* restrict a,
23 size_t a_stride,
24 const void* restrict w,
25 int8_t* restrict c,
26 size_t cm_stride,
27 size_t cn_stride,
28 const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
29 {
30 assert(mr != 0);
31 assert(mr <= 4);
32 assert(nc != 0);
33 assert(kc != 0);
34 assert(kc % sizeof(int8_t) == 0);
35 assert(a != NULL);
36 assert(w != NULL);
37 assert(c != NULL);
38
39 kc = round_up_po2(kc, 8 * sizeof(int8_t));
40 const int8_t* a0 = a;
41 int8_t* c0 = c;
42 const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
43 int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
44 if XNN_UNPREDICTABLE(mr < 2) {
45 a1 = a0;
46 c1 = c0;
47 }
48 const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
49 int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
50 if XNN_UNPREDICTABLE(mr <= 2) {
51 a2 = a1;
52 c2 = c1;
53 }
54 const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
55 int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
56 if XNN_UNPREDICTABLE(mr != 4) {
57 a3 = a2;
58 c3 = c2;
59 }
60
61 do {
62 int32x4_t vacc0x0 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
63 int32x4_t vacc0x1 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
64 int32x4_t vacc0x2 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
65 int32x4_t vacc0x3 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
66 int32x4_t vacc0x4 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
67 int32x4_t vacc0x5 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
68 int32x4_t vacc0x6 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
69 int32x4_t vacc0x7 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
70 int32x4_t vacc1x0 = vacc0x0;
71 int32x4_t vacc1x1 = vacc0x1;
72 int32x4_t vacc1x2 = vacc0x2;
73 int32x4_t vacc1x3 = vacc0x3;
74 int32x4_t vacc1x4 = vacc0x4;
75 int32x4_t vacc1x5 = vacc0x5;
76 int32x4_t vacc1x6 = vacc0x6;
77 int32x4_t vacc1x7 = vacc0x7;
78 int32x4_t vacc2x0 = vacc0x0;
79 int32x4_t vacc2x1 = vacc0x1;
80 int32x4_t vacc2x2 = vacc0x2;
81 int32x4_t vacc2x3 = vacc0x3;
82 int32x4_t vacc2x4 = vacc0x4;
83 int32x4_t vacc2x5 = vacc0x5;
84 int32x4_t vacc2x6 = vacc0x6;
85 int32x4_t vacc2x7 = vacc0x7;
86 int32x4_t vacc3x0 = vacc0x0;
87 int32x4_t vacc3x1 = vacc0x1;
88 int32x4_t vacc3x2 = vacc0x2;
89 int32x4_t vacc3x3 = vacc0x3;
90 int32x4_t vacc3x4 = vacc0x4;
91 int32x4_t vacc3x5 = vacc0x5;
92 int32x4_t vacc3x6 = vacc0x6;
93 int32x4_t vacc3x7 = vacc0x7;
94
95 size_t k = kc;
96
97 // Handle 8 bytes at a time using MUL.
98 while (k != 0) {
99 const int8x8_t va0 = vld1_s8(a0); a0 += 8;
100 const int8x8_t va1 = vld1_s8(a1); a1 += 8;
101 const int8x8_t va2 = vld1_s8(a2); a2 += 8;
102 const int8x8_t va3 = vld1_s8(a3); a3 += 8;
103
104 const int8x8_t vb0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
105 const int16x8_t vprod0x0 = vmull_s8(vb0, va0);
106 const int16x8_t vprod1x0 = vmull_s8(vb0, va1);
107 const int16x8_t vprod2x0 = vmull_s8(vb0, va2);
108 const int16x8_t vprod3x0 = vmull_s8(vb0, va3);
109 vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0);
110 vacc1x0 = vpadalq_s16(vacc1x0, vprod1x0);
111 vacc2x0 = vpadalq_s16(vacc2x0, vprod2x0);
112 vacc3x0 = vpadalq_s16(vacc3x0, vprod3x0);
113 const int8x8_t vb1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
114 const int16x8_t vprod0x1 = vmull_s8(vb1, va0);
115 const int16x8_t vprod1x1 = vmull_s8(vb1, va1);
116 const int16x8_t vprod2x1 = vmull_s8(vb1, va2);
117 const int16x8_t vprod3x1 = vmull_s8(vb1, va3);
118 vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1);
119 vacc1x1 = vpadalq_s16(vacc1x1, vprod1x1);
120 vacc2x1 = vpadalq_s16(vacc2x1, vprod2x1);
121 vacc3x1 = vpadalq_s16(vacc3x1, vprod3x1);
122 const int8x8_t vb2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
123 const int16x8_t vprod0x2 = vmull_s8(vb2, va0);
124 const int16x8_t vprod1x2 = vmull_s8(vb2, va1);
125 const int16x8_t vprod2x2 = vmull_s8(vb2, va2);
126 const int16x8_t vprod3x2 = vmull_s8(vb2, va3);
127 vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2);
128 vacc1x2 = vpadalq_s16(vacc1x2, vprod1x2);
129 vacc2x2 = vpadalq_s16(vacc2x2, vprod2x2);
130 vacc3x2 = vpadalq_s16(vacc3x2, vprod3x2);
131 const int8x8_t vb3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
132 const int16x8_t vprod0x3 = vmull_s8(vb3, va0);
133 const int16x8_t vprod1x3 = vmull_s8(vb3, va1);
134 const int16x8_t vprod2x3 = vmull_s8(vb3, va2);
135 const int16x8_t vprod3x3 = vmull_s8(vb3, va3);
136 vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3);
137 vacc1x3 = vpadalq_s16(vacc1x3, vprod1x3);
138 vacc2x3 = vpadalq_s16(vacc2x3, vprod2x3);
139 vacc3x3 = vpadalq_s16(vacc3x3, vprod3x3);
140 const int8x8_t vb4 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
141 const int16x8_t vprod0x4 = vmull_s8(vb4, va0);
142 const int16x8_t vprod1x4 = vmull_s8(vb4, va1);
143 const int16x8_t vprod2x4 = vmull_s8(vb4, va2);
144 const int16x8_t vprod3x4 = vmull_s8(vb4, va3);
145 vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4);
146 vacc1x4 = vpadalq_s16(vacc1x4, vprod1x4);
147 vacc2x4 = vpadalq_s16(vacc2x4, vprod2x4);
148 vacc3x4 = vpadalq_s16(vacc3x4, vprod3x4);
149 const int8x8_t vb5 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
150 const int16x8_t vprod0x5 = vmull_s8(vb5, va0);
151 const int16x8_t vprod1x5 = vmull_s8(vb5, va1);
152 const int16x8_t vprod2x5 = vmull_s8(vb5, va2);
153 const int16x8_t vprod3x5 = vmull_s8(vb5, va3);
154 vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5);
155 vacc1x5 = vpadalq_s16(vacc1x5, vprod1x5);
156 vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5);
157 vacc3x5 = vpadalq_s16(vacc3x5, vprod3x5);
158 const int8x8_t vb6 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
159 const int16x8_t vprod0x6 = vmull_s8(vb6, va0);
160 const int16x8_t vprod1x6 = vmull_s8(vb6, va1);
161 const int16x8_t vprod2x6 = vmull_s8(vb6, va2);
162 const int16x8_t vprod3x6 = vmull_s8(vb6, va3);
163 vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6);
164 vacc1x6 = vpadalq_s16(vacc1x6, vprod1x6);
165 vacc2x6 = vpadalq_s16(vacc2x6, vprod2x6);
166 vacc3x6 = vpadalq_s16(vacc3x6, vprod3x6);
167 const int8x8_t vb7 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
168 const int16x8_t vprod0x7 = vmull_s8(vb7, va0);
169 const int16x8_t vprod1x7 = vmull_s8(vb7, va1);
170 const int16x8_t vprod2x7 = vmull_s8(vb7, va2);
171 const int16x8_t vprod3x7 = vmull_s8(vb7, va3);
172 vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7);
173 vacc1x7 = vpadalq_s16(vacc1x7, vprod1x7);
174 vacc2x7 = vpadalq_s16(vacc2x7, vprod2x7);
175 vacc3x7 = vpadalq_s16(vacc3x7, vprod3x7);
176
177 k -= 8 * sizeof(int8_t);
178 }
179
180 #if XNN_ARCH_ARM64
181 const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1);
182 const int32x4_t vsum0x23 = vpaddq_s32(vacc0x2, vacc0x3);
183 const int32x4_t vsum0x45 = vpaddq_s32(vacc0x4, vacc0x5);
184 const int32x4_t vsum0x67 = vpaddq_s32(vacc0x6, vacc0x7);
185 const int32x4_t vsum1x01 = vpaddq_s32(vacc1x0, vacc1x1);
186 const int32x4_t vsum1x23 = vpaddq_s32(vacc1x2, vacc1x3);
187 const int32x4_t vsum1x45 = vpaddq_s32(vacc1x4, vacc1x5);
188 const int32x4_t vsum1x67 = vpaddq_s32(vacc1x6, vacc1x7);
189 const int32x4_t vsum2x01 = vpaddq_s32(vacc2x0, vacc2x1);
190 const int32x4_t vsum2x23 = vpaddq_s32(vacc2x2, vacc2x3);
191 const int32x4_t vsum2x45 = vpaddq_s32(vacc2x4, vacc2x5);
192 const int32x4_t vsum2x67 = vpaddq_s32(vacc2x6, vacc2x7);
193 const int32x4_t vsum3x01 = vpaddq_s32(vacc3x0, vacc3x1);
194 const int32x4_t vsum3x23 = vpaddq_s32(vacc3x2, vacc3x3);
195 const int32x4_t vsum3x45 = vpaddq_s32(vacc3x4, vacc3x5);
196 const int32x4_t vsum3x67 = vpaddq_s32(vacc3x6, vacc3x7);
197
198 int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23);
199 int32x4_t vacc0x4567 = vpaddq_s32(vsum0x45, vsum0x67);
200 int32x4_t vacc1x0123 = vpaddq_s32(vsum1x01, vsum1x23);
201 int32x4_t vacc1x4567 = vpaddq_s32(vsum1x45, vsum1x67);
202 int32x4_t vacc2x0123 = vpaddq_s32(vsum2x01, vsum2x23);
203 int32x4_t vacc2x4567 = vpaddq_s32(vsum2x45, vsum2x67);
204 int32x4_t vacc3x0123 = vpaddq_s32(vsum3x01, vsum3x23);
205 int32x4_t vacc3x4567 = vpaddq_s32(vsum3x45, vsum3x67);
206 #else
207 const int32x2_t vpsum0x0 = vadd_s32(vget_low_s32(vacc0x0), vget_high_s32(vacc0x0));
208 const int32x2_t vpsum0x1 = vadd_s32(vget_low_s32(vacc0x1), vget_high_s32(vacc0x1));
209 const int32x2_t vpsum0x2 = vadd_s32(vget_low_s32(vacc0x2), vget_high_s32(vacc0x2));
210 const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3));
211 const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1);
212 const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3);
213 int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 );
214 const int32x2_t vpsum0x4 = vadd_s32(vget_low_s32(vacc0x4), vget_high_s32(vacc0x4));
215 const int32x2_t vpsum0x5 = vadd_s32(vget_low_s32(vacc0x5), vget_high_s32(vacc0x5));
216 const int32x2_t vpsum0x6 = vadd_s32(vget_low_s32(vacc0x6), vget_high_s32(vacc0x6));
217 const int32x2_t vpsum0x7 = vadd_s32(vget_low_s32(vacc0x7), vget_high_s32(vacc0x7));
218 const int32x2_t vsum0x45 = vpadd_s32(vpsum0x4, vpsum0x5);
219 const int32x2_t vsum0x67 = vpadd_s32(vpsum0x6, vpsum0x7);
220 int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67 );
221 const int32x2_t vpsum1x0 = vadd_s32(vget_low_s32(vacc1x0), vget_high_s32(vacc1x0));
222 const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1));
223 const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2));
224 const int32x2_t vpsum1x3 = vadd_s32(vget_low_s32(vacc1x3), vget_high_s32(vacc1x3));
225 const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1);
226 const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3);
227 int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23 );
228 const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4));
229 const int32x2_t vpsum1x5 = vadd_s32(vget_low_s32(vacc1x5), vget_high_s32(vacc1x5));
230 const int32x2_t vpsum1x6 = vadd_s32(vget_low_s32(vacc1x6), vget_high_s32(vacc1x6));
231 const int32x2_t vpsum1x7 = vadd_s32(vget_low_s32(vacc1x7), vget_high_s32(vacc1x7));
232 const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5);
233 const int32x2_t vsum1x67 = vpadd_s32(vpsum1x6, vpsum1x7);
234 int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67 );
235 const int32x2_t vpsum2x0 = vadd_s32(vget_low_s32(vacc2x0), vget_high_s32(vacc2x0));
236 const int32x2_t vpsum2x1 = vadd_s32(vget_low_s32(vacc2x1), vget_high_s32(vacc2x1));
237 const int32x2_t vpsum2x2 = vadd_s32(vget_low_s32(vacc2x2), vget_high_s32(vacc2x2));
238 const int32x2_t vpsum2x3 = vadd_s32(vget_low_s32(vacc2x3), vget_high_s32(vacc2x3));
239 const int32x2_t vsum2x01 = vpadd_s32(vpsum2x0, vpsum2x1);
240 const int32x2_t vsum2x23 = vpadd_s32(vpsum2x2, vpsum2x3);
241 int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23 );
242 const int32x2_t vpsum2x4 = vadd_s32(vget_low_s32(vacc2x4), vget_high_s32(vacc2x4));
243 const int32x2_t vpsum2x5 = vadd_s32(vget_low_s32(vacc2x5), vget_high_s32(vacc2x5));
244 const int32x2_t vpsum2x6 = vadd_s32(vget_low_s32(vacc2x6), vget_high_s32(vacc2x6));
245 const int32x2_t vpsum2x7 = vadd_s32(vget_low_s32(vacc2x7), vget_high_s32(vacc2x7));
246 const int32x2_t vsum2x45 = vpadd_s32(vpsum2x4, vpsum2x5);
247 const int32x2_t vsum2x67 = vpadd_s32(vpsum2x6, vpsum2x7);
248 int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67 );
249 const int32x2_t vpsum3x0 = vadd_s32(vget_low_s32(vacc3x0), vget_high_s32(vacc3x0));
250 const int32x2_t vpsum3x1 = vadd_s32(vget_low_s32(vacc3x1), vget_high_s32(vacc3x1));
251 const int32x2_t vpsum3x2 = vadd_s32(vget_low_s32(vacc3x2), vget_high_s32(vacc3x2));
252 const int32x2_t vpsum3x3 = vadd_s32(vget_low_s32(vacc3x3), vget_high_s32(vacc3x3));
253 const int32x2_t vsum3x01 = vpadd_s32(vpsum3x0, vpsum3x1);
254 const int32x2_t vsum3x23 = vpadd_s32(vpsum3x2, vpsum3x3);
255 int32x4_t vacc3x0123 = vcombine_s32(vsum3x01, vsum3x23 );
256 const int32x2_t vpsum3x4 = vadd_s32(vget_low_s32(vacc3x4), vget_high_s32(vacc3x4));
257 const int32x2_t vpsum3x5 = vadd_s32(vget_low_s32(vacc3x5), vget_high_s32(vacc3x5));
258 const int32x2_t vpsum3x6 = vadd_s32(vget_low_s32(vacc3x6), vget_high_s32(vacc3x6));
259 const int32x2_t vpsum3x7 = vadd_s32(vget_low_s32(vacc3x7), vget_high_s32(vacc3x7));
260 const int32x2_t vsum3x45 = vpadd_s32(vpsum3x4, vpsum3x5);
261 const int32x2_t vsum3x67 = vpadd_s32(vpsum3x6, vpsum3x7);
262 int32x4_t vacc3x4567 = vcombine_s32(vsum3x45, vsum3x67 );
263 #endif
264
265 const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
266 const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
267 const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
268
269 vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
270 vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
271 vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift);
272 vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift);
273 vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift);
274 vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift);
275 vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift);
276 vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift);
277
278 vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
279 vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
280 vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
281 vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
282 vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
283 vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
284 vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
285 vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
286
287 vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
288 vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
289 vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
290 vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
291 vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
292 vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
293 vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
294 vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
295
296 const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
297 #if XNN_ARCH_ARM64
298 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
299 int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
300 int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567);
301 int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567);
302
303 vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
304 vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
305 vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
306 vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
307
308 int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
309 int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567);
310 #else
311 int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
312 int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
313 int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567));
314 int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567));
315
316 vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
317 vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
318 vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
319 vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point);
320
321 int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
322 int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567));
323 #endif
324
325 const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
326 vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
327 vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min);
328
329 const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
330 vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
331 vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max);
332
333 if (nc >= 8) {
334 vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
335 vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
336 vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567));
337 vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567));
338
339 c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
340 c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
341 c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
342 c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
343
344 a0 = (const int8_t*) ((uintptr_t) a0 - kc);
345 a1 = (const int8_t*) ((uintptr_t) a1 - kc);
346 a2 = (const int8_t*) ((uintptr_t) a2 - kc);
347 a3 = (const int8_t*) ((uintptr_t) a3 - kc);
348
349 nc -= 8;
350 } else {
351 // Final case where not all of the 8 columns fit in the destination.
352 if (nc & 4) {
353 vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
354 vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
355 vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4;
356 vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4;
357 vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
358 vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
359 }
360 if (nc & 2) {
361 vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
362 vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
363 vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2;
364 vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2;
365 vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
366 vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
367 }
368 if (nc & 1) {
369 vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
370 vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
371 vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0);
372 vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8);
373 }
374
375 nc = 0;
376 }
377 } while (nc != 0);
378 }
379