1 // Auto-generated file. Do not edit!
2 // Template: src/qu8-gemm/c4-neondot.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/gemm.h>
15 #include <xnnpack/math.h>
16
17
xnn_qu8_gemm_minmax_rndnu_ukernel_5x8c4__neondot(size_t mr,size_t nc,size_t kc,const uint8_t * restrict a,size_t a_stride,const void * restrict w,uint8_t * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qu8_gemm_minmax_rndnu_ukernel_5x8c4__neondot(
19 size_t mr,
20 size_t nc,
21 size_t kc,
22 const uint8_t* restrict a,
23 size_t a_stride,
24 const void* restrict w,
25 uint8_t* restrict c,
26 size_t cm_stride,
27 size_t cn_stride,
28 const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
29 {
30 assert(mr != 0);
31 assert(mr <= 5);
32 assert(nc != 0);
33 assert(kc != 0);
34 assert(kc % sizeof(uint8_t) == 0);
35 assert(a != NULL);
36 assert(w != NULL);
37 assert(c != NULL);
38
39 kc = round_up_po2(kc, 4 * sizeof(uint8_t));
40 const uint8_t* a0 = a;
41 uint8_t* c0 = c;
42 const uint8_t* a1 = (const uint8_t*) ((uintptr_t) a0 + a_stride);
43 uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
44 if XNN_UNPREDICTABLE(mr < 2) {
45 a1 = a0;
46 c1 = c0;
47 }
48 const uint8_t* a2 = (const uint8_t*) ((uintptr_t) a1 + a_stride);
49 uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
50 if XNN_UNPREDICTABLE(mr <= 2) {
51 a2 = a1;
52 c2 = c1;
53 }
54 const uint8_t* a3 = (const uint8_t*) ((uintptr_t) a2 + a_stride);
55 uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
56 if XNN_UNPREDICTABLE(mr < 4) {
57 a3 = a2;
58 c3 = c2;
59 }
60 const uint8_t* a4 = (const uint8_t*) ((uintptr_t) a3 + a_stride);
61 uint8_t* c4 = (uint8_t*) ((uintptr_t) c3 + cm_stride);
62 if XNN_UNPREDICTABLE(mr <= 4) {
63 a4 = a3;
64 c4 = c3;
65 }
66
67 const uint8x8_t va_zero_point = vld1_dup_u8(¶ms->rndnu_neon.kernel_zero_point[0]);
68
69 // Loop over groups of 8 columns.
70 do {
71 // Initialize accumulators with bias. 8 bias values are loaded from the
72 // weight matrix, at the start of the group of 8 columns.
73 uint32x4_t vpacc0x0123 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
74 uint32x4_t vpacc0x4567 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
75 uint32x4_t vpacc1x0123 = vpacc0x0123;
76 uint32x4_t vpacc1x4567 = vpacc0x4567;
77 uint32x4_t vpacc2x0123 = vpacc0x0123;
78 uint32x4_t vpacc2x4567 = vpacc0x4567;
79 uint32x4_t vpacc3x0123 = vpacc0x0123;
80 uint32x4_t vpacc3x4567 = vpacc0x4567;
81 uint32x4_t vpacc4x0123 = vpacc0x0123;
82 uint32x4_t vpacc4x4567 = vpacc0x4567;
83 uint32x2_t vnacc0 = vmov_n_u32(0);
84 uint32x2_t vnacc1 = vmov_n_u32(0);
85 uint32x2_t vnacc2 = vmov_n_u32(0);
86 uint32x2_t vnacc3 = vmov_n_u32(0);
87 uint32x2_t vnacc4 = vmov_n_u32(0);
88
89 // Inner accumulation loop along the 8 columns.
90 size_t k = kc;
91 // 2x partial unrolled loop to load 8 bytes at a time.
92 while (k >= 8 * sizeof(uint8_t)) {
93 // Load a 5x8 block of activations.
94 const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 8;
95 const uint8x8_t va1x01234567 = vld1_u8(a1); a1 += 8;
96 const uint8x8_t va2x01234567 = vld1_u8(a2); a2 += 8;
97 const uint8x8_t va3x01234567 = vld1_u8(a3); a3 += 8;
98 const uint8x8_t va4x01234567 = vld1_u8(a4); a4 += 8;
99
100 // Load a 8x8 block of weights.
101 const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
102 const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
103 const uint8x16_t vb4567x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
104 const uint8x16_t vb4567x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
105
106 // Multiply-accumulate: 5x8 * 8x8 --> 5x8.
107 vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
108 vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
109 vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
110 vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb4567x0123, va0x01234567, 1);
111 vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb4567x4567, va0x01234567, 1);
112 vnacc1 = vdot_u32(vnacc1, va_zero_point, va1x01234567);
113 vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
114 vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
115 vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb4567x0123, va1x01234567, 1);
116 vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb4567x4567, va1x01234567, 1);
117 vnacc2 = vdot_u32(vnacc2, va_zero_point, va2x01234567);
118 vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
119 vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
120 vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb4567x0123, va2x01234567, 1);
121 vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb4567x4567, va2x01234567, 1);
122 vnacc3 = vdot_u32(vnacc3, va_zero_point, va3x01234567);
123 vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
124 vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
125 vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb4567x0123, va3x01234567, 1);
126 vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb4567x4567, va3x01234567, 1);
127 vnacc4 = vdot_u32(vnacc4, va_zero_point, va4x01234567);
128 vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb0123x0123, va4x01234567, 0);
129 vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb0123x4567, va4x01234567, 0);
130 vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb4567x0123, va4x01234567, 1);
131 vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb4567x4567, va4x01234567, 1);
132
133 k -= 8 * sizeof(uint8_t);
134 }
135 // Handle up to 4 final positions of `k`
136 if XNN_UNLIKELY(k != 0) {
137 // Load a 5x4 block of activations.
138 const uint8x8_t va0x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a0, vmov_n_u32(0), 0)); a0 += 4;
139 const uint8x8_t va1x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a1, vmov_n_u32(0), 0)); a1 += 4;
140 const uint8x8_t va2x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a2, vmov_n_u32(0), 0)); a2 += 4;
141 const uint8x8_t va3x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a3, vmov_n_u32(0), 0)); a3 += 4;
142 const uint8x8_t va4x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a4, vmov_n_u32(0), 0)); a4 += 4;
143
144 // Load a 4x8 block of weights.
145 const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
146 const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
147
148 // Multiply-accumulate: 5x4 * 4x8 --> 5x8.
149 vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
150 vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
151 vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
152 vnacc1 = vdot_u32(vnacc1, va_zero_point, va1x01234567);
153 vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
154 vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
155 vnacc2 = vdot_u32(vnacc2, va_zero_point, va2x01234567);
156 vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
157 vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
158 vnacc3 = vdot_u32(vnacc3, va_zero_point, va3x01234567);
159 vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
160 vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
161 vnacc4 = vdot_u32(vnacc4, va_zero_point, va4x01234567);
162 vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb0123x0123, va4x01234567, 0);
163 vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb0123x4567, va4x01234567, 0);
164 }
165
166 // Subtract zero point from accumulators.
167 vnacc0 = vpadd_u32(vnacc0, vnacc0);
168 const uint32x4_t vnacc0x0123 = vcombine_u32(vnacc0, vnacc0);
169 int32x4_t vacc0x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x0123, vnacc0x0123));
170 int32x4_t vacc0x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x4567, vnacc0x0123));
171 vnacc1 = vpadd_u32(vnacc1, vnacc1);
172 const uint32x4_t vnacc1x0123 = vcombine_u32(vnacc1, vnacc1);
173 int32x4_t vacc1x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x0123, vnacc1x0123));
174 int32x4_t vacc1x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x4567, vnacc1x0123));
175 vnacc2 = vpadd_u32(vnacc2, vnacc2);
176 const uint32x4_t vnacc2x0123 = vcombine_u32(vnacc2, vnacc2);
177 int32x4_t vacc2x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x0123, vnacc2x0123));
178 int32x4_t vacc2x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x4567, vnacc2x0123));
179 vnacc3 = vpadd_u32(vnacc3, vnacc3);
180 const uint32x4_t vnacc3x0123 = vcombine_u32(vnacc3, vnacc3);
181 int32x4_t vacc3x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x0123, vnacc3x0123));
182 int32x4_t vacc3x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x4567, vnacc3x0123));
183 vnacc4 = vpadd_u32(vnacc4, vnacc4);
184 const uint32x4_t vnacc4x0123 = vcombine_u32(vnacc4, vnacc4);
185 int32x4_t vacc4x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc4x0123, vnacc4x0123));
186 int32x4_t vacc4x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc4x4567, vnacc4x0123));
187
188 const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
189 const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
190 const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
191
192 vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
193 vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
194 vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
195 vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
196 vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
197 vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
198 vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
199 vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
200 vacc4x0123 = vshlq_s32(vacc4x0123, vright_pre_shift);
201 vacc4x4567 = vshlq_s32(vacc4x4567, vright_pre_shift);
202
203 vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
204 vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
205 vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
206 vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
207 vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
208 vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
209 vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
210 vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
211 vacc4x0123 = vqdmulhq_s32(vacc4x0123, vmultiplier);
212 vacc4x4567 = vqdmulhq_s32(vacc4x4567, vmultiplier);
213
214 vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
215 vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
216 vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
217 vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
218 vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
219 vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
220 vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
221 vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
222 vacc4x0123 = vrshlq_s32(vacc4x0123, vright_post_shift);
223 vacc4x4567 = vrshlq_s32(vacc4x4567, vright_post_shift);
224
225 const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
226 #if XNN_ARCH_ARM64
227 const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
228 const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
229 const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
230 const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
231 const int16x8_t vacc4x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc4x0123), vacc4x4567), voutput_zero_point);
232
233 uint8x16_t vout0x01234567_1x01234567 = vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc1x01234567);
234 uint8x16_t vout2x01234567_3x01234567 = vqmovun_high_s16(vqmovun_s16(vacc2x01234567), vacc3x01234567);
235 uint8x8_t vout4x01234567 = vqmovun_s16(vacc4x01234567);
236 #else
237 const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
238 const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
239 const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
240 const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
241 const int16x8_t vacc4x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc4x0123), vqmovn_s32(vacc4x4567)), voutput_zero_point);
242
243 uint8x16_t vout0x01234567_1x01234567 = vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc1x01234567));
244 uint8x16_t vout2x01234567_3x01234567 = vcombine_u8(vqmovun_s16(vacc2x01234567), vqmovun_s16(vacc3x01234567));
245 uint8x8_t vout4x01234567 = vqmovun_s16(vacc4x01234567);
246 #endif
247 const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->rndnu_neon.output_min);
248 const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->rndnu_neon.output_max);
249
250 vout0x01234567_1x01234567 = vmaxq_u8(vout0x01234567_1x01234567, voutput_min);
251 vout2x01234567_3x01234567 = vmaxq_u8(vout2x01234567_3x01234567, voutput_min);
252 vout4x01234567 = vmax_u8(vout4x01234567, vget_low_u8(voutput_min));
253
254 vout0x01234567_1x01234567 = vminq_u8(vout0x01234567_1x01234567, voutput_max);
255 vout2x01234567_3x01234567 = vminq_u8(vout2x01234567_3x01234567, voutput_max);
256 vout4x01234567 = vmin_u8(vout4x01234567, vget_low_u8(voutput_max));
257
258 if (nc >= 8) {
259 vst1_u8(c0 + 0, vget_low_u8(vout0x01234567_1x01234567));
260 vst1_u8(c1 + 0, vget_high_u8(vout0x01234567_1x01234567));
261 vst1_u8(c2 + 0, vget_low_u8(vout2x01234567_3x01234567));
262 vst1_u8(c3 + 0, vget_high_u8(vout2x01234567_3x01234567));
263 vst1_u8(c4 + 0, vout4x01234567);
264
265 c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
266 c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
267 c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
268 c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
269 c4 = (uint8_t*) ((uintptr_t) c4 + cn_stride);
270
271 a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
272 a1 = (const uint8_t*) ((uintptr_t) a1 - kc);
273 a2 = (const uint8_t*) ((uintptr_t) a2 - kc);
274 a3 = (const uint8_t*) ((uintptr_t) a3 - kc);
275 a4 = (const uint8_t*) ((uintptr_t) a4 - kc);
276
277 nc -= 8;
278 } else {
279 if (nc & 4) {
280 vst1q_lane_u32((void*) c0, vreinterpretq_u32_u8(vout0x01234567_1x01234567), 0); c0 += 4;
281 vst1q_lane_u32((void*) c1, vreinterpretq_u32_u8(vout0x01234567_1x01234567), 2); c1 += 4;
282 vst1q_lane_u32((void*) c2, vreinterpretq_u32_u8(vout2x01234567_3x01234567), 0); c2 += 4;
283 vst1q_lane_u32((void*) c3, vreinterpretq_u32_u8(vout2x01234567_3x01234567), 2); c3 += 4;
284 vst1_lane_u32((void*) c4, vreinterpret_u32_u8(vout4x01234567), 0); c4 += 4;
285 vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
286 vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
287 vout4x01234567 = vext_u8(vout4x01234567, vout4x01234567, 4);
288 }
289 if (nc & 2) {
290 vst1q_lane_u16((void*) c0, vreinterpretq_u16_u8(vout0x01234567_1x01234567), 0); c0 += 2;
291 vst1q_lane_u16((void*) c1, vreinterpretq_u16_u8(vout0x01234567_1x01234567), 4); c1 += 2;
292 vst1q_lane_u16((void*) c2, vreinterpretq_u16_u8(vout2x01234567_3x01234567), 0); c2 += 2;
293 vst1q_lane_u16((void*) c3, vreinterpretq_u16_u8(vout2x01234567_3x01234567), 4); c3 += 2;
294 vst1_lane_u16((void*) c4, vreinterpret_u16_u8(vout4x01234567), 0); c4 += 2;
295 vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
296 vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
297 vout4x01234567 = vext_u8(vout4x01234567, vout4x01234567, 2);
298 }
299 if (nc & 1) {
300 vst1q_lane_u8(c0, vout0x01234567_1x01234567, 0);
301 vst1q_lane_u8(c1, vout0x01234567_1x01234567, 8);
302 vst1q_lane_u8(c2, vout2x01234567_3x01234567, 0);
303 vst1q_lane_u8(c3, vout2x01234567_3x01234567, 8);
304 vst1_lane_u8(c4, vout4x01234567, 0);
305 }
306
307 nc = 0;
308 }
309 } while (nc != 0);
310 }
311