xref: /aosp_15_r20/external/XNNPACK/src/qu8-gemm/gen/3x16c4-minmax-rndnu-neondot.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qu8-gemm/c4-neondot.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/gemm.h>
15 #include <xnnpack/math.h>
16 
17 
xnn_qu8_gemm_minmax_rndnu_ukernel_3x16c4__neondot(size_t mr,size_t nc,size_t kc,const uint8_t * restrict a,size_t a_stride,const void * restrict w,uint8_t * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qu8_gemm_minmax_rndnu_ukernel_3x16c4__neondot(
19     size_t mr,
20     size_t nc,
21     size_t kc,
22     const uint8_t* restrict a,
23     size_t a_stride,
24     const void* restrict w,
25     uint8_t* restrict c,
26     size_t cm_stride,
27     size_t cn_stride,
28     const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
29 {
30   assert(mr != 0);
31   assert(mr <= 3);
32   assert(nc != 0);
33   assert(kc != 0);
34   assert(kc % sizeof(uint8_t) == 0);
35   assert(a != NULL);
36   assert(w != NULL);
37   assert(c != NULL);
38 
39   kc = round_up_po2(kc, 4 * sizeof(uint8_t));
40   const uint8_t* a0 = a;
41   uint8_t* c0 = c;
42   const uint8_t* a1 = (const uint8_t*) ((uintptr_t) a0 + a_stride);
43   uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
44   if XNN_UNPREDICTABLE(mr < 2) {
45     a1 = a0;
46     c1 = c0;
47   }
48   const uint8_t* a2 = (const uint8_t*) ((uintptr_t) a1 + a_stride);
49   uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
50   if XNN_UNPREDICTABLE(mr <= 2) {
51     a2 = a1;
52     c2 = c1;
53   }
54 
55   const uint8x8_t va_zero_point = vld1_dup_u8(&params->rndnu_neon.kernel_zero_point[0]);
56 
57   // Loop over groups of 16 columns.
58   do {
59     // Initialize accumulators with bias. 16 bias values are loaded from the
60     // weight matrix, at the start of the group of 16 columns.
61     uint32x4_t vpacc0x0123 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
62     uint32x4_t vpacc0x4567 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
63     uint32x4_t vpacc0x89AB = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
64     uint32x4_t vpacc0xCDEF = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
65     uint32x4_t vpacc1x0123 = vpacc0x0123;
66     uint32x4_t vpacc1x4567 = vpacc0x4567;
67     uint32x4_t vpacc1x89AB = vpacc0x89AB;
68     uint32x4_t vpacc1xCDEF = vpacc0xCDEF;
69     uint32x4_t vpacc2x0123 = vpacc0x0123;
70     uint32x4_t vpacc2x4567 = vpacc0x4567;
71     uint32x4_t vpacc2x89AB = vpacc0x89AB;
72     uint32x4_t vpacc2xCDEF = vpacc0xCDEF;
73     uint32x2_t vnacc0 = vmov_n_u32(0);
74     uint32x2_t vnacc1 = vmov_n_u32(0);
75     uint32x2_t vnacc2 = vmov_n_u32(0);
76 
77     // Inner accumulation loop along the 16 columns.
78     size_t k = kc;
79     // 2x partial unrolled loop to load 8 bytes at a time.
80     while (k >= 8 * sizeof(uint8_t)) {
81       // Load a 3x8 block of activations.
82       const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 8;
83       const uint8x8_t va1x01234567 = vld1_u8(a1); a1 += 8;
84       const uint8x8_t va2x01234567 = vld1_u8(a2); a2 += 8;
85 
86       // Load a 8x16 block of weights.
87       const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
88       const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
89       const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
90       const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
91       const uint8x16_t vb4567x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
92       const uint8x16_t vb4567x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
93       const uint8x16_t vb4567x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
94       const uint8x16_t vb4567xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
95 
96       // Multiply-accumulate: 3x8 * 8x16 --> 3x16.
97       vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
98       vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
99       vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
100       vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
101       vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
102       vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb4567x0123, va0x01234567, 1);
103       vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb4567x4567, va0x01234567, 1);
104       vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb4567x89AB, va0x01234567, 1);
105       vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb4567xCDEF, va0x01234567, 1);
106       vnacc1 = vdot_u32(vnacc1, va_zero_point, va1x01234567);
107       vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
108       vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
109       vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb0123x89AB, va1x01234567, 0);
110       vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb0123xCDEF, va1x01234567, 0);
111       vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb4567x0123, va1x01234567, 1);
112       vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb4567x4567, va1x01234567, 1);
113       vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb4567x89AB, va1x01234567, 1);
114       vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb4567xCDEF, va1x01234567, 1);
115       vnacc2 = vdot_u32(vnacc2, va_zero_point, va2x01234567);
116       vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
117       vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
118       vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb0123x89AB, va2x01234567, 0);
119       vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb0123xCDEF, va2x01234567, 0);
120       vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb4567x0123, va2x01234567, 1);
121       vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb4567x4567, va2x01234567, 1);
122       vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb4567x89AB, va2x01234567, 1);
123       vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb4567xCDEF, va2x01234567, 1);
124 
125       k -= 8 * sizeof(uint8_t);
126     }
127     // Handle up to 4 final positions of `k`
128     if XNN_UNLIKELY(k != 0) {
129       // Load a 3x4 block of activations.
130       const uint8x8_t va0x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a0, vmov_n_u32(0), 0)); a0 += 4;
131       const uint8x8_t va1x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a1, vmov_n_u32(0), 0)); a1 += 4;
132       const uint8x8_t va2x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a2, vmov_n_u32(0), 0)); a2 += 4;
133 
134       // Load a 4x16 block of weights.
135       const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
136       const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
137       const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
138       const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
139 
140       // Multiply-accumulate: 3x4 * 4x16 --> 3x16.
141       vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
142       vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
143       vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
144       vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
145       vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
146       vnacc1 = vdot_u32(vnacc1, va_zero_point, va1x01234567);
147       vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
148       vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
149       vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb0123x89AB, va1x01234567, 0);
150       vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb0123xCDEF, va1x01234567, 0);
151       vnacc2 = vdot_u32(vnacc2, va_zero_point, va2x01234567);
152       vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
153       vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
154       vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb0123x89AB, va2x01234567, 0);
155       vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb0123xCDEF, va2x01234567, 0);
156     }
157 
158     // Subtract zero point from accumulators.
159     vnacc0 = vpadd_u32(vnacc0, vnacc0);
160     const uint32x4_t vnacc0x0123 = vcombine_u32(vnacc0, vnacc0);
161     int32x4_t vacc0x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x0123, vnacc0x0123));
162     int32x4_t vacc0x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x4567, vnacc0x0123));
163     int32x4_t vacc0x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc0x89AB, vnacc0x0123));
164     int32x4_t vacc0xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc0xCDEF, vnacc0x0123));
165     vnacc1 = vpadd_u32(vnacc1, vnacc1);
166     const uint32x4_t vnacc1x0123 = vcombine_u32(vnacc1, vnacc1);
167     int32x4_t vacc1x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x0123, vnacc1x0123));
168     int32x4_t vacc1x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x4567, vnacc1x0123));
169     int32x4_t vacc1x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc1x89AB, vnacc1x0123));
170     int32x4_t vacc1xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc1xCDEF, vnacc1x0123));
171     vnacc2 = vpadd_u32(vnacc2, vnacc2);
172     const uint32x4_t vnacc2x0123 = vcombine_u32(vnacc2, vnacc2);
173     int32x4_t vacc2x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x0123, vnacc2x0123));
174     int32x4_t vacc2x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x4567, vnacc2x0123));
175     int32x4_t vacc2x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc2x89AB, vnacc2x0123));
176     int32x4_t vacc2xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc2xCDEF, vnacc2x0123));
177 
178     const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift);
179     const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier);
180     const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift);
181 
182     vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
183     vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
184     vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
185     vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
186     vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
187     vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
188     vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
189     vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
190     vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
191     vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
192     vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
193     vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
194 
195     vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
196     vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
197     vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
198     vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
199     vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
200     vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
201     vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
202     vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
203     vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
204     vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
205     vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
206     vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
207 
208     vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
209     vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
210     vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
211     vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
212     vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
213     vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
214     vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
215     vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
216     vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
217     vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
218     vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
219     vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
220 
221     const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point);
222 #if XNN_ARCH_ARM64
223     const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
224     const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
225     const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
226     const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
227     const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
228     const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
229 
230     uint8x16_t vout0x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc0x89ABCDEF);
231     uint8x16_t vout1x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc1x01234567), vacc1x89ABCDEF);
232     uint8x16_t vout2x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc2x01234567), vacc2x89ABCDEF);
233 #else
234     const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
235     const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
236     const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
237     const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
238     const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
239     const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
240 
241     uint8x16_t vout0x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc0x89ABCDEF));
242     uint8x16_t vout1x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc1x01234567), vqmovun_s16(vacc1x89ABCDEF));
243     uint8x16_t vout2x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc2x01234567), vqmovun_s16(vacc2x89ABCDEF));
244 #endif
245     const uint8x16_t voutput_min = vld1q_dup_u8(&params->rndnu_neon.output_min);
246     const uint8x16_t voutput_max = vld1q_dup_u8(&params->rndnu_neon.output_max);
247 
248     vout0x0123456789ABCDEF = vmaxq_u8(vout0x0123456789ABCDEF, voutput_min);
249     vout1x0123456789ABCDEF = vmaxq_u8(vout1x0123456789ABCDEF, voutput_min);
250     vout2x0123456789ABCDEF = vmaxq_u8(vout2x0123456789ABCDEF, voutput_min);
251 
252     vout0x0123456789ABCDEF = vminq_u8(vout0x0123456789ABCDEF, voutput_max);
253     vout1x0123456789ABCDEF = vminq_u8(vout1x0123456789ABCDEF, voutput_max);
254     vout2x0123456789ABCDEF = vminq_u8(vout2x0123456789ABCDEF, voutput_max);
255 
256     if (nc >= 16) {
257       vst1q_u8(c0 + 0, vout0x0123456789ABCDEF);
258       vst1q_u8(c1 + 0, vout1x0123456789ABCDEF);
259       vst1q_u8(c2 + 0, vout2x0123456789ABCDEF);
260 
261       c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
262       c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
263       c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
264 
265       a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
266       a1 = (const uint8_t*) ((uintptr_t) a1 - kc);
267       a2 = (const uint8_t*) ((uintptr_t) a2 - kc);
268 
269       nc -= 16;
270     } else {
271       uint8x16_t vout0x01234567_1x01234567 = vcombine_u8(vget_low_u8(vout0x0123456789ABCDEF), vget_low_u8(vout1x0123456789ABCDEF));
272       uint8x8_t vout2x01234567 = vget_low_u8(vout2x0123456789ABCDEF);
273       if (nc & 8) {
274         vst1_u8(c0, vget_low_u8(vout0x01234567_1x01234567)); c0 += 8;
275         vst1_u8(c1, vget_high_u8(vout0x01234567_1x01234567)); c1 += 8;
276         vst1_u8(c2, vout2x01234567); c2 += 8;
277         vout0x01234567_1x01234567 = vcombine_u8(vget_high_u8(vout0x0123456789ABCDEF), vget_high_u8(vout1x0123456789ABCDEF));
278         vout2x01234567 = vget_high_u8(vout2x0123456789ABCDEF);
279       }
280       if (nc & 4) {
281         vst1q_lane_u32((void*) c0, vreinterpretq_u32_u8(vout0x01234567_1x01234567), 0); c0 += 4;
282         vst1q_lane_u32((void*) c1, vreinterpretq_u32_u8(vout0x01234567_1x01234567), 2); c1 += 4;
283         vst1_lane_u32((void*) c2, vreinterpret_u32_u8(vout2x01234567), 0); c2 += 4;
284         vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
285         vout2x01234567 = vext_u8(vout2x01234567, vout2x01234567, 4);
286       }
287       if (nc & 2) {
288         vst1q_lane_u16((void*) c0, vreinterpretq_u16_u8(vout0x01234567_1x01234567), 0); c0 += 2;
289         vst1q_lane_u16((void*) c1, vreinterpretq_u16_u8(vout0x01234567_1x01234567), 4); c1 += 2;
290         vst1_lane_u16((void*) c2, vreinterpret_u16_u8(vout2x01234567), 0); c2 += 2;
291         vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
292         vout2x01234567 = vext_u8(vout2x01234567, vout2x01234567, 2);
293       }
294       if (nc & 1) {
295         vst1q_lane_u8(c0, vout0x01234567_1x01234567, 0);
296         vst1q_lane_u8(c1, vout0x01234567_1x01234567, 8);
297         vst1_lane_u8(c2, vout2x01234567, 0);
298       }
299 
300       nc = 0;
301     }
302   } while (nc != 0);
303 }
304