1 // Auto-generated file. Do not edit!
2 // Template: src/qu8-gemm/c4-neondot.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/gemm.h>
15 #include <xnnpack/math.h>
16
17
xnn_qu8_gemm_minmax_rndnu_ukernel_8x8c4__neondot(size_t mr,size_t nc,size_t kc,const uint8_t * restrict a,size_t a_stride,const void * restrict w,uint8_t * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qu8_gemm_minmax_rndnu_ukernel_8x8c4__neondot(
19 size_t mr,
20 size_t nc,
21 size_t kc,
22 const uint8_t* restrict a,
23 size_t a_stride,
24 const void* restrict w,
25 uint8_t* restrict c,
26 size_t cm_stride,
27 size_t cn_stride,
28 const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
29 {
30 assert(mr != 0);
31 assert(mr <= 8);
32 assert(nc != 0);
33 assert(kc != 0);
34 assert(kc % sizeof(uint8_t) == 0);
35 assert(a != NULL);
36 assert(w != NULL);
37 assert(c != NULL);
38
39 kc = round_up_po2(kc, 4 * sizeof(uint8_t));
40 const uint8_t* a0 = a;
41 uint8_t* c0 = c;
42 const uint8_t* a1 = (const uint8_t*) ((uintptr_t) a0 + a_stride);
43 uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
44 if XNN_UNPREDICTABLE(mr < 2) {
45 a1 = a0;
46 c1 = c0;
47 }
48 const uint8_t* a2 = (const uint8_t*) ((uintptr_t) a1 + a_stride);
49 uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
50 if XNN_UNPREDICTABLE(mr <= 2) {
51 a2 = a1;
52 c2 = c1;
53 }
54 const uint8_t* a3 = (const uint8_t*) ((uintptr_t) a2 + a_stride);
55 uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
56 if XNN_UNPREDICTABLE(mr < 4) {
57 a3 = a2;
58 c3 = c2;
59 }
60 const uint8_t* a4 = (const uint8_t*) ((uintptr_t) a3 + a_stride);
61 uint8_t* c4 = (uint8_t*) ((uintptr_t) c3 + cm_stride);
62 if XNN_UNPREDICTABLE(mr <= 4) {
63 a4 = a3;
64 c4 = c3;
65 }
66 const uint8_t* a5 = (const uint8_t*) ((uintptr_t) a4 + a_stride);
67 uint8_t* c5 = (uint8_t*) ((uintptr_t) c4 + cm_stride);
68 if XNN_UNPREDICTABLE(mr < 6) {
69 a5 = a4;
70 c5 = c4;
71 }
72 const uint8_t* a6 = (const uint8_t*) ((uintptr_t) a5 + a_stride);
73 uint8_t* c6 = (uint8_t*) ((uintptr_t) c5 + cm_stride);
74 if XNN_UNPREDICTABLE(mr <= 6) {
75 a6 = a5;
76 c6 = c5;
77 }
78 const uint8_t* a7 = (const uint8_t*) ((uintptr_t) a6 + a_stride);
79 uint8_t* c7 = (uint8_t*) ((uintptr_t) c6 + cm_stride);
80 if XNN_UNPREDICTABLE(mr != 8) {
81 a7 = a6;
82 c7 = c6;
83 }
84
85 const uint8x8_t va_zero_point = vld1_dup_u8(¶ms->rndnu_neon.kernel_zero_point[0]);
86
87 // Loop over groups of 8 columns.
88 do {
89 // Initialize accumulators with bias. 8 bias values are loaded from the
90 // weight matrix, at the start of the group of 8 columns.
91 uint32x4_t vpacc0x0123 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
92 uint32x4_t vpacc0x4567 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
93 uint32x4_t vpacc1x0123 = vpacc0x0123;
94 uint32x4_t vpacc1x4567 = vpacc0x4567;
95 uint32x4_t vpacc2x0123 = vpacc0x0123;
96 uint32x4_t vpacc2x4567 = vpacc0x4567;
97 uint32x4_t vpacc3x0123 = vpacc0x0123;
98 uint32x4_t vpacc3x4567 = vpacc0x4567;
99 uint32x4_t vpacc4x0123 = vpacc0x0123;
100 uint32x4_t vpacc4x4567 = vpacc0x4567;
101 uint32x4_t vpacc5x0123 = vpacc0x0123;
102 uint32x4_t vpacc5x4567 = vpacc0x4567;
103 uint32x4_t vpacc6x0123 = vpacc0x0123;
104 uint32x4_t vpacc6x4567 = vpacc0x4567;
105 uint32x4_t vpacc7x0123 = vpacc0x0123;
106 uint32x4_t vpacc7x4567 = vpacc0x4567;
107 uint32x2_t vnacc0 = vmov_n_u32(0);
108 uint32x2_t vnacc1 = vmov_n_u32(0);
109 uint32x2_t vnacc2 = vmov_n_u32(0);
110 uint32x2_t vnacc3 = vmov_n_u32(0);
111 uint32x2_t vnacc4 = vmov_n_u32(0);
112 uint32x2_t vnacc5 = vmov_n_u32(0);
113 uint32x2_t vnacc6 = vmov_n_u32(0);
114 uint32x2_t vnacc7 = vmov_n_u32(0);
115
116 // Inner accumulation loop along the 8 columns.
117 size_t k = kc;
118 // 2x partial unrolled loop to load 8 bytes at a time.
119 while (k >= 8 * sizeof(uint8_t)) {
120 // Load a 8x8 block of activations.
121 const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 8;
122 const uint8x8_t va1x01234567 = vld1_u8(a1); a1 += 8;
123 const uint8x8_t va2x01234567 = vld1_u8(a2); a2 += 8;
124 const uint8x8_t va3x01234567 = vld1_u8(a3); a3 += 8;
125 const uint8x8_t va4x01234567 = vld1_u8(a4); a4 += 8;
126 const uint8x8_t va5x01234567 = vld1_u8(a5); a5 += 8;
127 const uint8x8_t va6x01234567 = vld1_u8(a6); a6 += 8;
128 const uint8x8_t va7x01234567 = vld1_u8(a7); a7 += 8;
129
130 // Load a 8x8 block of weights.
131 const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
132 const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
133 const uint8x16_t vb4567x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
134 const uint8x16_t vb4567x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
135
136 // Multiply-accumulate: 8x8 * 8x8 --> 8x8.
137 vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
138 vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
139 vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
140 vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb4567x0123, va0x01234567, 1);
141 vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb4567x4567, va0x01234567, 1);
142 vnacc1 = vdot_u32(vnacc1, va_zero_point, va1x01234567);
143 vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
144 vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
145 vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb4567x0123, va1x01234567, 1);
146 vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb4567x4567, va1x01234567, 1);
147 vnacc2 = vdot_u32(vnacc2, va_zero_point, va2x01234567);
148 vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
149 vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
150 vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb4567x0123, va2x01234567, 1);
151 vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb4567x4567, va2x01234567, 1);
152 vnacc3 = vdot_u32(vnacc3, va_zero_point, va3x01234567);
153 vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
154 vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
155 vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb4567x0123, va3x01234567, 1);
156 vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb4567x4567, va3x01234567, 1);
157 vnacc4 = vdot_u32(vnacc4, va_zero_point, va4x01234567);
158 vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb0123x0123, va4x01234567, 0);
159 vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb0123x4567, va4x01234567, 0);
160 vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb4567x0123, va4x01234567, 1);
161 vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb4567x4567, va4x01234567, 1);
162 vnacc5 = vdot_u32(vnacc5, va_zero_point, va5x01234567);
163 vpacc5x0123 = vdotq_lane_u32(vpacc5x0123, vb0123x0123, va5x01234567, 0);
164 vpacc5x4567 = vdotq_lane_u32(vpacc5x4567, vb0123x4567, va5x01234567, 0);
165 vpacc5x0123 = vdotq_lane_u32(vpacc5x0123, vb4567x0123, va5x01234567, 1);
166 vpacc5x4567 = vdotq_lane_u32(vpacc5x4567, vb4567x4567, va5x01234567, 1);
167 vnacc6 = vdot_u32(vnacc6, va_zero_point, va6x01234567);
168 vpacc6x0123 = vdotq_lane_u32(vpacc6x0123, vb0123x0123, va6x01234567, 0);
169 vpacc6x4567 = vdotq_lane_u32(vpacc6x4567, vb0123x4567, va6x01234567, 0);
170 vpacc6x0123 = vdotq_lane_u32(vpacc6x0123, vb4567x0123, va6x01234567, 1);
171 vpacc6x4567 = vdotq_lane_u32(vpacc6x4567, vb4567x4567, va6x01234567, 1);
172 vnacc7 = vdot_u32(vnacc7, va_zero_point, va7x01234567);
173 vpacc7x0123 = vdotq_lane_u32(vpacc7x0123, vb0123x0123, va7x01234567, 0);
174 vpacc7x4567 = vdotq_lane_u32(vpacc7x4567, vb0123x4567, va7x01234567, 0);
175 vpacc7x0123 = vdotq_lane_u32(vpacc7x0123, vb4567x0123, va7x01234567, 1);
176 vpacc7x4567 = vdotq_lane_u32(vpacc7x4567, vb4567x4567, va7x01234567, 1);
177
178 k -= 8 * sizeof(uint8_t);
179 }
180 // Handle up to 4 final positions of `k`
181 if XNN_UNLIKELY(k != 0) {
182 // Load a 8x4 block of activations.
183 const uint8x8_t va0x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a0, vmov_n_u32(0), 0)); a0 += 4;
184 const uint8x8_t va1x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a1, vmov_n_u32(0), 0)); a1 += 4;
185 const uint8x8_t va2x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a2, vmov_n_u32(0), 0)); a2 += 4;
186 const uint8x8_t va3x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a3, vmov_n_u32(0), 0)); a3 += 4;
187 const uint8x8_t va4x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a4, vmov_n_u32(0), 0)); a4 += 4;
188 const uint8x8_t va5x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a5, vmov_n_u32(0), 0)); a5 += 4;
189 const uint8x8_t va6x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a6, vmov_n_u32(0), 0)); a6 += 4;
190 const uint8x8_t va7x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a7, vmov_n_u32(0), 0)); a7 += 4;
191
192 // Load a 4x8 block of weights.
193 const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
194 const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
195
196 // Multiply-accumulate: 8x4 * 4x8 --> 8x8.
197 vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
198 vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
199 vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
200 vnacc1 = vdot_u32(vnacc1, va_zero_point, va1x01234567);
201 vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
202 vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
203 vnacc2 = vdot_u32(vnacc2, va_zero_point, va2x01234567);
204 vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
205 vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
206 vnacc3 = vdot_u32(vnacc3, va_zero_point, va3x01234567);
207 vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
208 vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
209 vnacc4 = vdot_u32(vnacc4, va_zero_point, va4x01234567);
210 vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb0123x0123, va4x01234567, 0);
211 vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb0123x4567, va4x01234567, 0);
212 vnacc5 = vdot_u32(vnacc5, va_zero_point, va5x01234567);
213 vpacc5x0123 = vdotq_lane_u32(vpacc5x0123, vb0123x0123, va5x01234567, 0);
214 vpacc5x4567 = vdotq_lane_u32(vpacc5x4567, vb0123x4567, va5x01234567, 0);
215 vnacc6 = vdot_u32(vnacc6, va_zero_point, va6x01234567);
216 vpacc6x0123 = vdotq_lane_u32(vpacc6x0123, vb0123x0123, va6x01234567, 0);
217 vpacc6x4567 = vdotq_lane_u32(vpacc6x4567, vb0123x4567, va6x01234567, 0);
218 vnacc7 = vdot_u32(vnacc7, va_zero_point, va7x01234567);
219 vpacc7x0123 = vdotq_lane_u32(vpacc7x0123, vb0123x0123, va7x01234567, 0);
220 vpacc7x4567 = vdotq_lane_u32(vpacc7x4567, vb0123x4567, va7x01234567, 0);
221 }
222
223 // Subtract zero point from accumulators.
224 vnacc0 = vpadd_u32(vnacc0, vnacc0);
225 const uint32x4_t vnacc0x0123 = vcombine_u32(vnacc0, vnacc0);
226 int32x4_t vacc0x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x0123, vnacc0x0123));
227 int32x4_t vacc0x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x4567, vnacc0x0123));
228 vnacc1 = vpadd_u32(vnacc1, vnacc1);
229 const uint32x4_t vnacc1x0123 = vcombine_u32(vnacc1, vnacc1);
230 int32x4_t vacc1x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x0123, vnacc1x0123));
231 int32x4_t vacc1x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x4567, vnacc1x0123));
232 vnacc2 = vpadd_u32(vnacc2, vnacc2);
233 const uint32x4_t vnacc2x0123 = vcombine_u32(vnacc2, vnacc2);
234 int32x4_t vacc2x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x0123, vnacc2x0123));
235 int32x4_t vacc2x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x4567, vnacc2x0123));
236 vnacc3 = vpadd_u32(vnacc3, vnacc3);
237 const uint32x4_t vnacc3x0123 = vcombine_u32(vnacc3, vnacc3);
238 int32x4_t vacc3x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x0123, vnacc3x0123));
239 int32x4_t vacc3x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x4567, vnacc3x0123));
240 vnacc4 = vpadd_u32(vnacc4, vnacc4);
241 const uint32x4_t vnacc4x0123 = vcombine_u32(vnacc4, vnacc4);
242 int32x4_t vacc4x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc4x0123, vnacc4x0123));
243 int32x4_t vacc4x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc4x4567, vnacc4x0123));
244 vnacc5 = vpadd_u32(vnacc5, vnacc5);
245 const uint32x4_t vnacc5x0123 = vcombine_u32(vnacc5, vnacc5);
246 int32x4_t vacc5x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc5x0123, vnacc5x0123));
247 int32x4_t vacc5x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc5x4567, vnacc5x0123));
248 vnacc6 = vpadd_u32(vnacc6, vnacc6);
249 const uint32x4_t vnacc6x0123 = vcombine_u32(vnacc6, vnacc6);
250 int32x4_t vacc6x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc6x0123, vnacc6x0123));
251 int32x4_t vacc6x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc6x4567, vnacc6x0123));
252 vnacc7 = vpadd_u32(vnacc7, vnacc7);
253 const uint32x4_t vnacc7x0123 = vcombine_u32(vnacc7, vnacc7);
254 int32x4_t vacc7x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc7x0123, vnacc7x0123));
255 int32x4_t vacc7x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc7x4567, vnacc7x0123));
256
257 const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
258 const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
259 const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
260
261 vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
262 vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
263 vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
264 vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
265 vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
266 vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
267 vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
268 vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
269 vacc4x0123 = vshlq_s32(vacc4x0123, vright_pre_shift);
270 vacc4x4567 = vshlq_s32(vacc4x4567, vright_pre_shift);
271 vacc5x0123 = vshlq_s32(vacc5x0123, vright_pre_shift);
272 vacc5x4567 = vshlq_s32(vacc5x4567, vright_pre_shift);
273 vacc6x0123 = vshlq_s32(vacc6x0123, vright_pre_shift);
274 vacc6x4567 = vshlq_s32(vacc6x4567, vright_pre_shift);
275 vacc7x0123 = vshlq_s32(vacc7x0123, vright_pre_shift);
276 vacc7x4567 = vshlq_s32(vacc7x4567, vright_pre_shift);
277
278 vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
279 vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
280 vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
281 vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
282 vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
283 vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
284 vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
285 vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
286 vacc4x0123 = vqdmulhq_s32(vacc4x0123, vmultiplier);
287 vacc4x4567 = vqdmulhq_s32(vacc4x4567, vmultiplier);
288 vacc5x0123 = vqdmulhq_s32(vacc5x0123, vmultiplier);
289 vacc5x4567 = vqdmulhq_s32(vacc5x4567, vmultiplier);
290 vacc6x0123 = vqdmulhq_s32(vacc6x0123, vmultiplier);
291 vacc6x4567 = vqdmulhq_s32(vacc6x4567, vmultiplier);
292 vacc7x0123 = vqdmulhq_s32(vacc7x0123, vmultiplier);
293 vacc7x4567 = vqdmulhq_s32(vacc7x4567, vmultiplier);
294
295 vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
296 vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
297 vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
298 vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
299 vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
300 vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
301 vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
302 vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
303 vacc4x0123 = vrshlq_s32(vacc4x0123, vright_post_shift);
304 vacc4x4567 = vrshlq_s32(vacc4x4567, vright_post_shift);
305 vacc5x0123 = vrshlq_s32(vacc5x0123, vright_post_shift);
306 vacc5x4567 = vrshlq_s32(vacc5x4567, vright_post_shift);
307 vacc6x0123 = vrshlq_s32(vacc6x0123, vright_post_shift);
308 vacc6x4567 = vrshlq_s32(vacc6x4567, vright_post_shift);
309 vacc7x0123 = vrshlq_s32(vacc7x0123, vright_post_shift);
310 vacc7x4567 = vrshlq_s32(vacc7x4567, vright_post_shift);
311
312 const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
313 #if XNN_ARCH_ARM64
314 const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
315 const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
316 const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
317 const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
318 const int16x8_t vacc4x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc4x0123), vacc4x4567), voutput_zero_point);
319 const int16x8_t vacc5x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc5x0123), vacc5x4567), voutput_zero_point);
320 const int16x8_t vacc6x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc6x0123), vacc6x4567), voutput_zero_point);
321 const int16x8_t vacc7x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc7x0123), vacc7x4567), voutput_zero_point);
322
323 uint8x16_t vout0x01234567_1x01234567 = vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc1x01234567);
324 uint8x16_t vout2x01234567_3x01234567 = vqmovun_high_s16(vqmovun_s16(vacc2x01234567), vacc3x01234567);
325 uint8x16_t vout4x01234567_5x01234567 = vqmovun_high_s16(vqmovun_s16(vacc4x01234567), vacc5x01234567);
326 uint8x16_t vout6x01234567_7x01234567 = vqmovun_high_s16(vqmovun_s16(vacc6x01234567), vacc7x01234567);
327 #else
328 const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
329 const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
330 const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
331 const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
332 const int16x8_t vacc4x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc4x0123), vqmovn_s32(vacc4x4567)), voutput_zero_point);
333 const int16x8_t vacc5x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc5x0123), vqmovn_s32(vacc5x4567)), voutput_zero_point);
334 const int16x8_t vacc6x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc6x0123), vqmovn_s32(vacc6x4567)), voutput_zero_point);
335 const int16x8_t vacc7x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc7x0123), vqmovn_s32(vacc7x4567)), voutput_zero_point);
336
337 uint8x16_t vout0x01234567_1x01234567 = vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc1x01234567));
338 uint8x16_t vout2x01234567_3x01234567 = vcombine_u8(vqmovun_s16(vacc2x01234567), vqmovun_s16(vacc3x01234567));
339 uint8x16_t vout4x01234567_5x01234567 = vcombine_u8(vqmovun_s16(vacc4x01234567), vqmovun_s16(vacc5x01234567));
340 uint8x16_t vout6x01234567_7x01234567 = vcombine_u8(vqmovun_s16(vacc6x01234567), vqmovun_s16(vacc7x01234567));
341 #endif
342 const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->rndnu_neon.output_min);
343 const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->rndnu_neon.output_max);
344
345 vout0x01234567_1x01234567 = vmaxq_u8(vout0x01234567_1x01234567, voutput_min);
346 vout2x01234567_3x01234567 = vmaxq_u8(vout2x01234567_3x01234567, voutput_min);
347 vout4x01234567_5x01234567 = vmaxq_u8(vout4x01234567_5x01234567, voutput_min);
348 vout6x01234567_7x01234567 = vmaxq_u8(vout6x01234567_7x01234567, voutput_min);
349
350 vout0x01234567_1x01234567 = vminq_u8(vout0x01234567_1x01234567, voutput_max);
351 vout2x01234567_3x01234567 = vminq_u8(vout2x01234567_3x01234567, voutput_max);
352 vout4x01234567_5x01234567 = vminq_u8(vout4x01234567_5x01234567, voutput_max);
353 vout6x01234567_7x01234567 = vminq_u8(vout6x01234567_7x01234567, voutput_max);
354
355 if (nc >= 8) {
356 vst1_u8(c0 + 0, vget_low_u8(vout0x01234567_1x01234567));
357 vst1_u8(c1 + 0, vget_high_u8(vout0x01234567_1x01234567));
358 vst1_u8(c2 + 0, vget_low_u8(vout2x01234567_3x01234567));
359 vst1_u8(c3 + 0, vget_high_u8(vout2x01234567_3x01234567));
360 vst1_u8(c4 + 0, vget_low_u8(vout4x01234567_5x01234567));
361 vst1_u8(c5 + 0, vget_high_u8(vout4x01234567_5x01234567));
362 vst1_u8(c6 + 0, vget_low_u8(vout6x01234567_7x01234567));
363 vst1_u8(c7 + 0, vget_high_u8(vout6x01234567_7x01234567));
364
365 c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
366 c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
367 c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
368 c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
369 c4 = (uint8_t*) ((uintptr_t) c4 + cn_stride);
370 c5 = (uint8_t*) ((uintptr_t) c5 + cn_stride);
371 c6 = (uint8_t*) ((uintptr_t) c6 + cn_stride);
372 c7 = (uint8_t*) ((uintptr_t) c7 + cn_stride);
373
374 a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
375 a1 = (const uint8_t*) ((uintptr_t) a1 - kc);
376 a2 = (const uint8_t*) ((uintptr_t) a2 - kc);
377 a3 = (const uint8_t*) ((uintptr_t) a3 - kc);
378 a4 = (const uint8_t*) ((uintptr_t) a4 - kc);
379 a5 = (const uint8_t*) ((uintptr_t) a5 - kc);
380 a6 = (const uint8_t*) ((uintptr_t) a6 - kc);
381 a7 = (const uint8_t*) ((uintptr_t) a7 - kc);
382
383 nc -= 8;
384 } else {
385 if (nc & 4) {
386 vst1q_lane_u32((void*) c0, vreinterpretq_u32_u8(vout0x01234567_1x01234567), 0); c0 += 4;
387 vst1q_lane_u32((void*) c1, vreinterpretq_u32_u8(vout0x01234567_1x01234567), 2); c1 += 4;
388 vst1q_lane_u32((void*) c2, vreinterpretq_u32_u8(vout2x01234567_3x01234567), 0); c2 += 4;
389 vst1q_lane_u32((void*) c3, vreinterpretq_u32_u8(vout2x01234567_3x01234567), 2); c3 += 4;
390 vst1q_lane_u32((void*) c4, vreinterpretq_u32_u8(vout4x01234567_5x01234567), 0); c4 += 4;
391 vst1q_lane_u32((void*) c5, vreinterpretq_u32_u8(vout4x01234567_5x01234567), 2); c5 += 4;
392 vst1q_lane_u32((void*) c6, vreinterpretq_u32_u8(vout6x01234567_7x01234567), 0); c6 += 4;
393 vst1q_lane_u32((void*) c7, vreinterpretq_u32_u8(vout6x01234567_7x01234567), 2); c7 += 4;
394 vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
395 vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
396 vout4x01234567_5x01234567 = vextq_u8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 4);
397 vout6x01234567_7x01234567 = vextq_u8(vout6x01234567_7x01234567, vout6x01234567_7x01234567, 4);
398 }
399 if (nc & 2) {
400 vst1q_lane_u16((void*) c0, vreinterpretq_u16_u8(vout0x01234567_1x01234567), 0); c0 += 2;
401 vst1q_lane_u16((void*) c1, vreinterpretq_u16_u8(vout0x01234567_1x01234567), 4); c1 += 2;
402 vst1q_lane_u16((void*) c2, vreinterpretq_u16_u8(vout2x01234567_3x01234567), 0); c2 += 2;
403 vst1q_lane_u16((void*) c3, vreinterpretq_u16_u8(vout2x01234567_3x01234567), 4); c3 += 2;
404 vst1q_lane_u16((void*) c4, vreinterpretq_u16_u8(vout4x01234567_5x01234567), 0); c4 += 2;
405 vst1q_lane_u16((void*) c5, vreinterpretq_u16_u8(vout4x01234567_5x01234567), 4); c5 += 2;
406 vst1q_lane_u16((void*) c6, vreinterpretq_u16_u8(vout6x01234567_7x01234567), 0); c6 += 2;
407 vst1q_lane_u16((void*) c7, vreinterpretq_u16_u8(vout6x01234567_7x01234567), 4); c7 += 2;
408 vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
409 vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
410 vout4x01234567_5x01234567 = vextq_u8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 2);
411 vout6x01234567_7x01234567 = vextq_u8(vout6x01234567_7x01234567, vout6x01234567_7x01234567, 2);
412 }
413 if (nc & 1) {
414 vst1q_lane_u8(c0, vout0x01234567_1x01234567, 0);
415 vst1q_lane_u8(c1, vout0x01234567_1x01234567, 8);
416 vst1q_lane_u8(c2, vout2x01234567_3x01234567, 0);
417 vst1q_lane_u8(c3, vout2x01234567_3x01234567, 8);
418 vst1q_lane_u8(c4, vout4x01234567_5x01234567, 0);
419 vst1q_lane_u8(c5, vout4x01234567_5x01234567, 8);
420 vst1q_lane_u8(c6, vout6x01234567_7x01234567, 0);
421 vst1q_lane_u8(c7, vout6x01234567_7x01234567, 8);
422 }
423
424 nc = 0;
425 }
426 } while (nc != 0);
427 }
428