1 // Auto-generated file. Do not edit!
2 // Template: src/qu8-gemm/c4-neondot.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/gemm.h>
15 #include <xnnpack/math.h>
16
17
xnn_qu8_gemm_minmax_rndnu_ukernel_6x8c4__neondot(size_t mr,size_t nc,size_t kc,const uint8_t * restrict a,size_t a_stride,const void * restrict w,uint8_t * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qu8_gemm_minmax_rndnu_ukernel_6x8c4__neondot(
19 size_t mr,
20 size_t nc,
21 size_t kc,
22 const uint8_t* restrict a,
23 size_t a_stride,
24 const void* restrict w,
25 uint8_t* restrict c,
26 size_t cm_stride,
27 size_t cn_stride,
28 const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
29 {
30 assert(mr != 0);
31 assert(mr <= 6);
32 assert(nc != 0);
33 assert(kc != 0);
34 assert(kc % sizeof(uint8_t) == 0);
35 assert(a != NULL);
36 assert(w != NULL);
37 assert(c != NULL);
38
39 kc = round_up_po2(kc, 4 * sizeof(uint8_t));
40 const uint8_t* a0 = a;
41 uint8_t* c0 = c;
42 const uint8_t* a1 = (const uint8_t*) ((uintptr_t) a0 + a_stride);
43 uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
44 if XNN_UNPREDICTABLE(mr < 2) {
45 a1 = a0;
46 c1 = c0;
47 }
48 const uint8_t* a2 = (const uint8_t*) ((uintptr_t) a1 + a_stride);
49 uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
50 if XNN_UNPREDICTABLE(mr <= 2) {
51 a2 = a1;
52 c2 = c1;
53 }
54 const uint8_t* a3 = (const uint8_t*) ((uintptr_t) a2 + a_stride);
55 uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
56 if XNN_UNPREDICTABLE(mr < 4) {
57 a3 = a2;
58 c3 = c2;
59 }
60 const uint8_t* a4 = (const uint8_t*) ((uintptr_t) a3 + a_stride);
61 uint8_t* c4 = (uint8_t*) ((uintptr_t) c3 + cm_stride);
62 if XNN_UNPREDICTABLE(mr <= 4) {
63 a4 = a3;
64 c4 = c3;
65 }
66 const uint8_t* a5 = (const uint8_t*) ((uintptr_t) a4 + a_stride);
67 uint8_t* c5 = (uint8_t*) ((uintptr_t) c4 + cm_stride);
68 if XNN_UNPREDICTABLE(mr != 6) {
69 a5 = a4;
70 c5 = c4;
71 }
72
73 const uint8x8_t va_zero_point = vld1_dup_u8(¶ms->rndnu_neon.kernel_zero_point[0]);
74
75 // Loop over groups of 8 columns.
76 do {
77 // Initialize accumulators with bias. 8 bias values are loaded from the
78 // weight matrix, at the start of the group of 8 columns.
79 uint32x4_t vpacc0x0123 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
80 uint32x4_t vpacc0x4567 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
81 uint32x4_t vpacc1x0123 = vpacc0x0123;
82 uint32x4_t vpacc1x4567 = vpacc0x4567;
83 uint32x4_t vpacc2x0123 = vpacc0x0123;
84 uint32x4_t vpacc2x4567 = vpacc0x4567;
85 uint32x4_t vpacc3x0123 = vpacc0x0123;
86 uint32x4_t vpacc3x4567 = vpacc0x4567;
87 uint32x4_t vpacc4x0123 = vpacc0x0123;
88 uint32x4_t vpacc4x4567 = vpacc0x4567;
89 uint32x4_t vpacc5x0123 = vpacc0x0123;
90 uint32x4_t vpacc5x4567 = vpacc0x4567;
91 uint32x2_t vnacc0 = vmov_n_u32(0);
92 uint32x2_t vnacc1 = vmov_n_u32(0);
93 uint32x2_t vnacc2 = vmov_n_u32(0);
94 uint32x2_t vnacc3 = vmov_n_u32(0);
95 uint32x2_t vnacc4 = vmov_n_u32(0);
96 uint32x2_t vnacc5 = vmov_n_u32(0);
97
98 // Inner accumulation loop along the 8 columns.
99 size_t k = kc;
100 // 2x partial unrolled loop to load 8 bytes at a time.
101 while (k >= 8 * sizeof(uint8_t)) {
102 // Load a 6x8 block of activations.
103 const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 8;
104 const uint8x8_t va1x01234567 = vld1_u8(a1); a1 += 8;
105 const uint8x8_t va2x01234567 = vld1_u8(a2); a2 += 8;
106 const uint8x8_t va3x01234567 = vld1_u8(a3); a3 += 8;
107 const uint8x8_t va4x01234567 = vld1_u8(a4); a4 += 8;
108 const uint8x8_t va5x01234567 = vld1_u8(a5); a5 += 8;
109
110 // Load a 8x8 block of weights.
111 const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
112 const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
113 const uint8x16_t vb4567x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
114 const uint8x16_t vb4567x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
115
116 // Multiply-accumulate: 6x8 * 8x8 --> 6x8.
117 vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
118 vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
119 vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
120 vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb4567x0123, va0x01234567, 1);
121 vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb4567x4567, va0x01234567, 1);
122 vnacc1 = vdot_u32(vnacc1, va_zero_point, va1x01234567);
123 vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
124 vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
125 vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb4567x0123, va1x01234567, 1);
126 vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb4567x4567, va1x01234567, 1);
127 vnacc2 = vdot_u32(vnacc2, va_zero_point, va2x01234567);
128 vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
129 vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
130 vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb4567x0123, va2x01234567, 1);
131 vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb4567x4567, va2x01234567, 1);
132 vnacc3 = vdot_u32(vnacc3, va_zero_point, va3x01234567);
133 vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
134 vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
135 vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb4567x0123, va3x01234567, 1);
136 vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb4567x4567, va3x01234567, 1);
137 vnacc4 = vdot_u32(vnacc4, va_zero_point, va4x01234567);
138 vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb0123x0123, va4x01234567, 0);
139 vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb0123x4567, va4x01234567, 0);
140 vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb4567x0123, va4x01234567, 1);
141 vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb4567x4567, va4x01234567, 1);
142 vnacc5 = vdot_u32(vnacc5, va_zero_point, va5x01234567);
143 vpacc5x0123 = vdotq_lane_u32(vpacc5x0123, vb0123x0123, va5x01234567, 0);
144 vpacc5x4567 = vdotq_lane_u32(vpacc5x4567, vb0123x4567, va5x01234567, 0);
145 vpacc5x0123 = vdotq_lane_u32(vpacc5x0123, vb4567x0123, va5x01234567, 1);
146 vpacc5x4567 = vdotq_lane_u32(vpacc5x4567, vb4567x4567, va5x01234567, 1);
147
148 k -= 8 * sizeof(uint8_t);
149 }
150 // Handle up to 4 final positions of `k`
151 if XNN_UNLIKELY(k != 0) {
152 // Load a 6x4 block of activations.
153 const uint8x8_t va0x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a0, vmov_n_u32(0), 0)); a0 += 4;
154 const uint8x8_t va1x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a1, vmov_n_u32(0), 0)); a1 += 4;
155 const uint8x8_t va2x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a2, vmov_n_u32(0), 0)); a2 += 4;
156 const uint8x8_t va3x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a3, vmov_n_u32(0), 0)); a3 += 4;
157 const uint8x8_t va4x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a4, vmov_n_u32(0), 0)); a4 += 4;
158 const uint8x8_t va5x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a5, vmov_n_u32(0), 0)); a5 += 4;
159
160 // Load a 4x8 block of weights.
161 const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
162 const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
163
164 // Multiply-accumulate: 6x4 * 4x8 --> 6x8.
165 vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
166 vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
167 vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
168 vnacc1 = vdot_u32(vnacc1, va_zero_point, va1x01234567);
169 vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
170 vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
171 vnacc2 = vdot_u32(vnacc2, va_zero_point, va2x01234567);
172 vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
173 vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
174 vnacc3 = vdot_u32(vnacc3, va_zero_point, va3x01234567);
175 vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
176 vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
177 vnacc4 = vdot_u32(vnacc4, va_zero_point, va4x01234567);
178 vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb0123x0123, va4x01234567, 0);
179 vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb0123x4567, va4x01234567, 0);
180 vnacc5 = vdot_u32(vnacc5, va_zero_point, va5x01234567);
181 vpacc5x0123 = vdotq_lane_u32(vpacc5x0123, vb0123x0123, va5x01234567, 0);
182 vpacc5x4567 = vdotq_lane_u32(vpacc5x4567, vb0123x4567, va5x01234567, 0);
183 }
184
185 // Subtract zero point from accumulators.
186 vnacc0 = vpadd_u32(vnacc0, vnacc0);
187 const uint32x4_t vnacc0x0123 = vcombine_u32(vnacc0, vnacc0);
188 int32x4_t vacc0x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x0123, vnacc0x0123));
189 int32x4_t vacc0x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x4567, vnacc0x0123));
190 vnacc1 = vpadd_u32(vnacc1, vnacc1);
191 const uint32x4_t vnacc1x0123 = vcombine_u32(vnacc1, vnacc1);
192 int32x4_t vacc1x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x0123, vnacc1x0123));
193 int32x4_t vacc1x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x4567, vnacc1x0123));
194 vnacc2 = vpadd_u32(vnacc2, vnacc2);
195 const uint32x4_t vnacc2x0123 = vcombine_u32(vnacc2, vnacc2);
196 int32x4_t vacc2x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x0123, vnacc2x0123));
197 int32x4_t vacc2x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x4567, vnacc2x0123));
198 vnacc3 = vpadd_u32(vnacc3, vnacc3);
199 const uint32x4_t vnacc3x0123 = vcombine_u32(vnacc3, vnacc3);
200 int32x4_t vacc3x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x0123, vnacc3x0123));
201 int32x4_t vacc3x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x4567, vnacc3x0123));
202 vnacc4 = vpadd_u32(vnacc4, vnacc4);
203 const uint32x4_t vnacc4x0123 = vcombine_u32(vnacc4, vnacc4);
204 int32x4_t vacc4x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc4x0123, vnacc4x0123));
205 int32x4_t vacc4x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc4x4567, vnacc4x0123));
206 vnacc5 = vpadd_u32(vnacc5, vnacc5);
207 const uint32x4_t vnacc5x0123 = vcombine_u32(vnacc5, vnacc5);
208 int32x4_t vacc5x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc5x0123, vnacc5x0123));
209 int32x4_t vacc5x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc5x4567, vnacc5x0123));
210
211 const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
212 const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
213 const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
214
215 vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
216 vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
217 vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
218 vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
219 vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
220 vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
221 vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
222 vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
223 vacc4x0123 = vshlq_s32(vacc4x0123, vright_pre_shift);
224 vacc4x4567 = vshlq_s32(vacc4x4567, vright_pre_shift);
225 vacc5x0123 = vshlq_s32(vacc5x0123, vright_pre_shift);
226 vacc5x4567 = vshlq_s32(vacc5x4567, vright_pre_shift);
227
228 vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
229 vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
230 vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
231 vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
232 vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
233 vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
234 vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
235 vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
236 vacc4x0123 = vqdmulhq_s32(vacc4x0123, vmultiplier);
237 vacc4x4567 = vqdmulhq_s32(vacc4x4567, vmultiplier);
238 vacc5x0123 = vqdmulhq_s32(vacc5x0123, vmultiplier);
239 vacc5x4567 = vqdmulhq_s32(vacc5x4567, vmultiplier);
240
241 vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
242 vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
243 vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
244 vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
245 vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
246 vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
247 vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
248 vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
249 vacc4x0123 = vrshlq_s32(vacc4x0123, vright_post_shift);
250 vacc4x4567 = vrshlq_s32(vacc4x4567, vright_post_shift);
251 vacc5x0123 = vrshlq_s32(vacc5x0123, vright_post_shift);
252 vacc5x4567 = vrshlq_s32(vacc5x4567, vright_post_shift);
253
254 const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
255 #if XNN_ARCH_ARM64
256 const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
257 const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
258 const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
259 const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
260 const int16x8_t vacc4x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc4x0123), vacc4x4567), voutput_zero_point);
261 const int16x8_t vacc5x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc5x0123), vacc5x4567), voutput_zero_point);
262
263 uint8x16_t vout0x01234567_1x01234567 = vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc1x01234567);
264 uint8x16_t vout2x01234567_3x01234567 = vqmovun_high_s16(vqmovun_s16(vacc2x01234567), vacc3x01234567);
265 uint8x16_t vout4x01234567_5x01234567 = vqmovun_high_s16(vqmovun_s16(vacc4x01234567), vacc5x01234567);
266 #else
267 const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
268 const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
269 const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
270 const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
271 const int16x8_t vacc4x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc4x0123), vqmovn_s32(vacc4x4567)), voutput_zero_point);
272 const int16x8_t vacc5x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc5x0123), vqmovn_s32(vacc5x4567)), voutput_zero_point);
273
274 uint8x16_t vout0x01234567_1x01234567 = vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc1x01234567));
275 uint8x16_t vout2x01234567_3x01234567 = vcombine_u8(vqmovun_s16(vacc2x01234567), vqmovun_s16(vacc3x01234567));
276 uint8x16_t vout4x01234567_5x01234567 = vcombine_u8(vqmovun_s16(vacc4x01234567), vqmovun_s16(vacc5x01234567));
277 #endif
278 const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->rndnu_neon.output_min);
279 const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->rndnu_neon.output_max);
280
281 vout0x01234567_1x01234567 = vmaxq_u8(vout0x01234567_1x01234567, voutput_min);
282 vout2x01234567_3x01234567 = vmaxq_u8(vout2x01234567_3x01234567, voutput_min);
283 vout4x01234567_5x01234567 = vmaxq_u8(vout4x01234567_5x01234567, voutput_min);
284
285 vout0x01234567_1x01234567 = vminq_u8(vout0x01234567_1x01234567, voutput_max);
286 vout2x01234567_3x01234567 = vminq_u8(vout2x01234567_3x01234567, voutput_max);
287 vout4x01234567_5x01234567 = vminq_u8(vout4x01234567_5x01234567, voutput_max);
288
289 if (nc >= 8) {
290 vst1_u8(c0 + 0, vget_low_u8(vout0x01234567_1x01234567));
291 vst1_u8(c1 + 0, vget_high_u8(vout0x01234567_1x01234567));
292 vst1_u8(c2 + 0, vget_low_u8(vout2x01234567_3x01234567));
293 vst1_u8(c3 + 0, vget_high_u8(vout2x01234567_3x01234567));
294 vst1_u8(c4 + 0, vget_low_u8(vout4x01234567_5x01234567));
295 vst1_u8(c5 + 0, vget_high_u8(vout4x01234567_5x01234567));
296
297 c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
298 c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
299 c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
300 c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
301 c4 = (uint8_t*) ((uintptr_t) c4 + cn_stride);
302 c5 = (uint8_t*) ((uintptr_t) c5 + cn_stride);
303
304 a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
305 a1 = (const uint8_t*) ((uintptr_t) a1 - kc);
306 a2 = (const uint8_t*) ((uintptr_t) a2 - kc);
307 a3 = (const uint8_t*) ((uintptr_t) a3 - kc);
308 a4 = (const uint8_t*) ((uintptr_t) a4 - kc);
309 a5 = (const uint8_t*) ((uintptr_t) a5 - kc);
310
311 nc -= 8;
312 } else {
313 if (nc & 4) {
314 vst1q_lane_u32((void*) c0, vreinterpretq_u32_u8(vout0x01234567_1x01234567), 0); c0 += 4;
315 vst1q_lane_u32((void*) c1, vreinterpretq_u32_u8(vout0x01234567_1x01234567), 2); c1 += 4;
316 vst1q_lane_u32((void*) c2, vreinterpretq_u32_u8(vout2x01234567_3x01234567), 0); c2 += 4;
317 vst1q_lane_u32((void*) c3, vreinterpretq_u32_u8(vout2x01234567_3x01234567), 2); c3 += 4;
318 vst1q_lane_u32((void*) c4, vreinterpretq_u32_u8(vout4x01234567_5x01234567), 0); c4 += 4;
319 vst1q_lane_u32((void*) c5, vreinterpretq_u32_u8(vout4x01234567_5x01234567), 2); c5 += 4;
320 vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
321 vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
322 vout4x01234567_5x01234567 = vextq_u8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 4);
323 }
324 if (nc & 2) {
325 vst1q_lane_u16((void*) c0, vreinterpretq_u16_u8(vout0x01234567_1x01234567), 0); c0 += 2;
326 vst1q_lane_u16((void*) c1, vreinterpretq_u16_u8(vout0x01234567_1x01234567), 4); c1 += 2;
327 vst1q_lane_u16((void*) c2, vreinterpretq_u16_u8(vout2x01234567_3x01234567), 0); c2 += 2;
328 vst1q_lane_u16((void*) c3, vreinterpretq_u16_u8(vout2x01234567_3x01234567), 4); c3 += 2;
329 vst1q_lane_u16((void*) c4, vreinterpretq_u16_u8(vout4x01234567_5x01234567), 0); c4 += 2;
330 vst1q_lane_u16((void*) c5, vreinterpretq_u16_u8(vout4x01234567_5x01234567), 4); c5 += 2;
331 vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
332 vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
333 vout4x01234567_5x01234567 = vextq_u8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 2);
334 }
335 if (nc & 1) {
336 vst1q_lane_u8(c0, vout0x01234567_1x01234567, 0);
337 vst1q_lane_u8(c1, vout0x01234567_1x01234567, 8);
338 vst1q_lane_u8(c2, vout2x01234567_3x01234567, 0);
339 vst1q_lane_u8(c3, vout2x01234567_3x01234567, 8);
340 vst1q_lane_u8(c4, vout4x01234567_5x01234567, 0);
341 vst1q_lane_u8(c5, vout4x01234567_5x01234567, 8);
342 }
343
344 nc = 0;
345 }
346 } while (nc != 0);
347 }
348