1 // Auto-generated file. Do not edit!
2 // Template: src/f16-igemm/neonfp16arith-ld64.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10
11 #include <assert.h>
12
13 #include <arm_neon.h>
14
15 #include <xnnpack/igemm.h>
16
17
xnn_f16_igemm_minmax_ukernel_8x8__neonfp16arith_ld64(size_t mr,size_t nc,size_t kc,size_t ks,const void ** restrict a,const void * restrict w,void * restrict c,size_t cm_stride,size_t cn_stride,size_t a_offset,const void * zero,const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f16_igemm_minmax_ukernel_8x8__neonfp16arith_ld64(
19 size_t mr,
20 size_t nc,
21 size_t kc,
22 size_t ks,
23 const void** restrict a,
24 const void* restrict w,
25 void* restrict c,
26 size_t cm_stride,
27 size_t cn_stride,
28 size_t a_offset,
29 const void* zero,
30 const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
31 {
32 assert(mr != 0);
33 assert(mr <= 8);
34 assert(nc != 0);
35 assert(kc != 0);
36 assert(kc % sizeof(__fp16) == 0);
37 assert(ks != 0);
38 assert(ks % (8 * sizeof(void*)) == 0);
39 assert(a_offset % sizeof(__fp16) == 0);
40 assert(a != NULL);
41 assert(w != NULL);
42 assert(c != NULL);
43
44 __fp16* c0 = (__fp16*) c;
45 __fp16* c1 = (__fp16*) ((uintptr_t) c0 + cm_stride);
46 if XNN_UNPREDICTABLE(mr < 2) {
47 c1 = c0;
48 }
49 __fp16* c2 = (__fp16*) ((uintptr_t) c1 + cm_stride);
50 if XNN_UNPREDICTABLE(mr <= 2) {
51 c2 = c1;
52 }
53 __fp16* c3 = (__fp16*) ((uintptr_t) c2 + cm_stride);
54 if XNN_UNPREDICTABLE(mr < 4) {
55 c3 = c2;
56 }
57 __fp16* c4 = (__fp16*) ((uintptr_t) c3 + cm_stride);
58 if XNN_UNPREDICTABLE(mr <= 4) {
59 c4 = c3;
60 }
61 __fp16* c5 = (__fp16*) ((uintptr_t) c4 + cm_stride);
62 if XNN_UNPREDICTABLE(mr < 6) {
63 c5 = c4;
64 }
65 __fp16* c6 = (__fp16*) ((uintptr_t) c5 + cm_stride);
66 if XNN_UNPREDICTABLE(mr <= 6) {
67 c6 = c5;
68 }
69 __fp16* c7 = (__fp16*) ((uintptr_t) c6 + cm_stride);
70 if XNN_UNPREDICTABLE(mr != 8) {
71 c7 = c6;
72 }
73
74 do {
75 float16x8_t vacc0x01234567 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
76 float16x8_t vacc1x01234567 = vacc0x01234567;
77 float16x8_t vacc2x01234567 = vacc0x01234567;
78 float16x8_t vacc3x01234567 = vacc0x01234567;
79 float16x8_t vacc4x01234567 = vacc0x01234567;
80 float16x8_t vacc5x01234567 = vacc0x01234567;
81 float16x8_t vacc6x01234567 = vacc0x01234567;
82 float16x8_t vacc7x01234567 = vacc0x01234567;
83
84 size_t p = ks;
85 do {
86 const __fp16* restrict a0 = (const __fp16*) a[0];
87 assert(a0 != NULL);
88 if XNN_UNPREDICTABLE(a0 != zero) {
89 a0 = (const __fp16*) ((uintptr_t) a0 + a_offset);
90 }
91 const __fp16* restrict a1 = (const __fp16*) a[1];
92 assert(a1 != NULL);
93 if XNN_UNPREDICTABLE(a1 != zero) {
94 a1 = (const __fp16*) ((uintptr_t) a1 + a_offset);
95 }
96 const __fp16* restrict a2 = (const __fp16*) a[2];
97 assert(a2 != NULL);
98 if XNN_UNPREDICTABLE(a2 != zero) {
99 a2 = (const __fp16*) ((uintptr_t) a2 + a_offset);
100 }
101 const __fp16* restrict a3 = (const __fp16*) a[3];
102 assert(a3 != NULL);
103 if XNN_UNPREDICTABLE(a3 != zero) {
104 a3 = (const __fp16*) ((uintptr_t) a3 + a_offset);
105 }
106 const __fp16* restrict a4 = (const __fp16*) a[4];
107 assert(a4 != NULL);
108 if XNN_UNPREDICTABLE(a4 != zero) {
109 a4 = (const __fp16*) ((uintptr_t) a4 + a_offset);
110 }
111 const __fp16* restrict a5 = (const __fp16*) a[5];
112 assert(a5 != NULL);
113 if XNN_UNPREDICTABLE(a5 != zero) {
114 a5 = (const __fp16*) ((uintptr_t) a5 + a_offset);
115 }
116 const __fp16* restrict a6 = (const __fp16*) a[6];
117 assert(a6 != NULL);
118 if XNN_UNPREDICTABLE(a6 != zero) {
119 a6 = (const __fp16*) ((uintptr_t) a6 + a_offset);
120 }
121 const __fp16* restrict a7 = (const __fp16*) a[7];
122 assert(a7 != NULL);
123 if XNN_UNPREDICTABLE(a7 != zero) {
124 a7 = (const __fp16*) ((uintptr_t) a7 + a_offset);
125 }
126 a += 8;
127
128 size_t k = kc;
129 for (; k >= 4 * sizeof(__fp16); k -= 4 * sizeof(__fp16)) {
130 const float16x4_t va0 = vld1_f16(a0); a0 += 4;
131 const float16x4_t va1 = vld1_f16(a1); a1 += 4;
132 const float16x4_t va2 = vld1_f16(a2); a2 += 4;
133 const float16x4_t va3 = vld1_f16(a3); a3 += 4;
134 const float16x4_t va4 = vld1_f16(a4); a4 += 4;
135 const float16x4_t va5 = vld1_f16(a5); a5 += 4;
136 const float16x4_t va6 = vld1_f16(a6); a6 += 4;
137 const float16x4_t va7 = vld1_f16(a7); a7 += 4;
138
139 const float16x8_t vb01234567c0 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
140
141 #if XNN_ARCH_ARM64
142 vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
143 vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
144 vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
145 vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
146 vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c0, va4, 0);
147 vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c0, va5, 0);
148 vacc6x01234567 = vfmaq_lane_f16(vacc6x01234567, vb01234567c0, va6, 0);
149 vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c0, va7, 0);
150 #else
151 const float16x8_t va0c0 = vdupq_lane_f16(va0, 0);
152 const float16x8_t va1c0 = vdupq_lane_f16(va1, 0);
153 const float16x8_t va2c0 = vdupq_lane_f16(va2, 0);
154 const float16x8_t va3c0 = vdupq_lane_f16(va3, 0);
155 const float16x8_t va4c0 = vdupq_lane_f16(va4, 0);
156 const float16x8_t va5c0 = vdupq_lane_f16(va5, 0);
157 const float16x8_t va6c0 = vdupq_lane_f16(va6, 0);
158 const float16x8_t va7c0 = vdupq_lane_f16(va7, 0);
159
160 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c0, vb01234567c0);
161 vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c0, vb01234567c0);
162 vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c0, vb01234567c0);
163 vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c0, vb01234567c0);
164 vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c0, vb01234567c0);
165 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c0, vb01234567c0);
166 vacc6x01234567 = vfmaq_f16(vacc6x01234567, va6c0, vb01234567c0);
167 vacc7x01234567 = vfmaq_f16(vacc7x01234567, va7c0, vb01234567c0);
168 #endif
169 const float16x8_t vb01234567c1 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
170
171 #if XNN_ARCH_ARM64
172 vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
173 vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
174 vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
175 vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
176 vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c1, va4, 1);
177 vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c1, va5, 1);
178 vacc6x01234567 = vfmaq_lane_f16(vacc6x01234567, vb01234567c1, va6, 1);
179 vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c1, va7, 1);
180 #else
181 const float16x8_t va0c1 = vdupq_lane_f16(va0, 1);
182 const float16x8_t va1c1 = vdupq_lane_f16(va1, 1);
183 const float16x8_t va2c1 = vdupq_lane_f16(va2, 1);
184 const float16x8_t va3c1 = vdupq_lane_f16(va3, 1);
185 const float16x8_t va4c1 = vdupq_lane_f16(va4, 1);
186 const float16x8_t va5c1 = vdupq_lane_f16(va5, 1);
187 const float16x8_t va6c1 = vdupq_lane_f16(va6, 1);
188 const float16x8_t va7c1 = vdupq_lane_f16(va7, 1);
189
190 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c1, vb01234567c1);
191 vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c1, vb01234567c1);
192 vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c1, vb01234567c1);
193 vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c1, vb01234567c1);
194 vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c1, vb01234567c1);
195 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c1, vb01234567c1);
196 vacc6x01234567 = vfmaq_f16(vacc6x01234567, va6c1, vb01234567c1);
197 vacc7x01234567 = vfmaq_f16(vacc7x01234567, va7c1, vb01234567c1);
198 #endif
199 const float16x8_t vb01234567c2 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
200
201 #if XNN_ARCH_ARM64
202 vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
203 vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
204 vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
205 vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
206 vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c2, va4, 2);
207 vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c2, va5, 2);
208 vacc6x01234567 = vfmaq_lane_f16(vacc6x01234567, vb01234567c2, va6, 2);
209 vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c2, va7, 2);
210 #else
211 const float16x8_t va0c2 = vdupq_lane_f16(va0, 2);
212 const float16x8_t va1c2 = vdupq_lane_f16(va1, 2);
213 const float16x8_t va2c2 = vdupq_lane_f16(va2, 2);
214 const float16x8_t va3c2 = vdupq_lane_f16(va3, 2);
215 const float16x8_t va4c2 = vdupq_lane_f16(va4, 2);
216 const float16x8_t va5c2 = vdupq_lane_f16(va5, 2);
217 const float16x8_t va6c2 = vdupq_lane_f16(va6, 2);
218 const float16x8_t va7c2 = vdupq_lane_f16(va7, 2);
219
220 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c2, vb01234567c2);
221 vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c2, vb01234567c2);
222 vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c2, vb01234567c2);
223 vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c2, vb01234567c2);
224 vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c2, vb01234567c2);
225 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c2, vb01234567c2);
226 vacc6x01234567 = vfmaq_f16(vacc6x01234567, va6c2, vb01234567c2);
227 vacc7x01234567 = vfmaq_f16(vacc7x01234567, va7c2, vb01234567c2);
228 #endif
229 const float16x8_t vb01234567c3 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
230
231 #if XNN_ARCH_ARM64
232 vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
233 vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
234 vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
235 vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
236 vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c3, va4, 3);
237 vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c3, va5, 3);
238 vacc6x01234567 = vfmaq_lane_f16(vacc6x01234567, vb01234567c3, va6, 3);
239 vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c3, va7, 3);
240 #else
241 const float16x8_t va0c3 = vdupq_lane_f16(va0, 3);
242 const float16x8_t va1c3 = vdupq_lane_f16(va1, 3);
243 const float16x8_t va2c3 = vdupq_lane_f16(va2, 3);
244 const float16x8_t va3c3 = vdupq_lane_f16(va3, 3);
245 const float16x8_t va4c3 = vdupq_lane_f16(va4, 3);
246 const float16x8_t va5c3 = vdupq_lane_f16(va5, 3);
247 const float16x8_t va6c3 = vdupq_lane_f16(va6, 3);
248 const float16x8_t va7c3 = vdupq_lane_f16(va7, 3);
249
250 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c3, vb01234567c3);
251 vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c3, vb01234567c3);
252 vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c3, vb01234567c3);
253 vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c3, vb01234567c3);
254 vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c3, vb01234567c3);
255 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c3, vb01234567c3);
256 vacc6x01234567 = vfmaq_f16(vacc6x01234567, va6c3, vb01234567c3);
257 vacc7x01234567 = vfmaq_f16(vacc7x01234567, va7c3, vb01234567c3);
258 #endif
259 }
260 if XNN_UNLIKELY(k != 0) {
261 do {
262 const float16x8_t va0 = vld1q_dup_f16(a0); a0 += 1;
263 const float16x8_t va1 = vld1q_dup_f16(a1); a1 += 1;
264 const float16x8_t va2 = vld1q_dup_f16(a2); a2 += 1;
265 const float16x8_t va3 = vld1q_dup_f16(a3); a3 += 1;
266 const float16x8_t va4 = vld1q_dup_f16(a4); a4 += 1;
267 const float16x8_t va5 = vld1q_dup_f16(a5); a5 += 1;
268 const float16x8_t va6 = vld1q_dup_f16(a6); a6 += 1;
269 const float16x8_t va7 = vld1q_dup_f16(a7); a7 += 1;
270
271 const float16x8_t vb01234567 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
272
273 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0, vb01234567);
274 vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1, vb01234567);
275 vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2, vb01234567);
276 vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3, vb01234567);
277 vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4, vb01234567);
278 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5, vb01234567);
279 vacc6x01234567 = vfmaq_f16(vacc6x01234567, va6, vb01234567);
280 vacc7x01234567 = vfmaq_f16(vacc7x01234567, va7, vb01234567);
281
282 k -= sizeof(__fp16);
283 } while (k != 0);
284 }
285 p -= 8 * sizeof(void*);
286 } while (p != 0);
287
288
289 const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->neon.max));
290 vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
291 vacc1x01234567 = vminq_f16(vacc1x01234567, vmax);
292 vacc2x01234567 = vminq_f16(vacc2x01234567, vmax);
293 vacc3x01234567 = vminq_f16(vacc3x01234567, vmax);
294 vacc4x01234567 = vminq_f16(vacc4x01234567, vmax);
295 vacc5x01234567 = vminq_f16(vacc5x01234567, vmax);
296 vacc6x01234567 = vminq_f16(vacc6x01234567, vmax);
297 vacc7x01234567 = vminq_f16(vacc7x01234567, vmax);
298
299 const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->neon.min));
300 vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
301 vacc1x01234567 = vmaxq_f16(vacc1x01234567, vmin);
302 vacc2x01234567 = vmaxq_f16(vacc2x01234567, vmin);
303 vacc3x01234567 = vmaxq_f16(vacc3x01234567, vmin);
304 vacc4x01234567 = vmaxq_f16(vacc4x01234567, vmin);
305 vacc5x01234567 = vmaxq_f16(vacc5x01234567, vmin);
306 vacc6x01234567 = vmaxq_f16(vacc6x01234567, vmin);
307 vacc7x01234567 = vmaxq_f16(vacc7x01234567, vmin);
308
309 if XNN_LIKELY(nc >= 8) {
310 vst1q_f16(c7, vacc7x01234567);
311 c7 = (__fp16*) ((uintptr_t) c7 + cn_stride);
312 vst1q_f16(c6, vacc6x01234567);
313 c6 = (__fp16*) ((uintptr_t) c6 + cn_stride);
314 vst1q_f16(c5, vacc5x01234567);
315 c5 = (__fp16*) ((uintptr_t) c5 + cn_stride);
316 vst1q_f16(c4, vacc4x01234567);
317 c4 = (__fp16*) ((uintptr_t) c4 + cn_stride);
318 vst1q_f16(c3, vacc3x01234567);
319 c3 = (__fp16*) ((uintptr_t) c3 + cn_stride);
320 vst1q_f16(c2, vacc2x01234567);
321 c2 = (__fp16*) ((uintptr_t) c2 + cn_stride);
322 vst1q_f16(c1, vacc1x01234567);
323 c1 = (__fp16*) ((uintptr_t) c1 + cn_stride);
324 vst1q_f16(c0, vacc0x01234567);
325 c0 = (__fp16*) ((uintptr_t) c0 + cn_stride);
326
327 a = (const void**restrict) ((uintptr_t) a - ks);
328 nc -= 8;
329 } else {
330 float16x4_t vacc7x0123 = vget_low_f16(vacc7x01234567);
331 float16x4_t vacc6x0123 = vget_low_f16(vacc6x01234567);
332 float16x4_t vacc5x0123 = vget_low_f16(vacc5x01234567);
333 float16x4_t vacc4x0123 = vget_low_f16(vacc4x01234567);
334 float16x4_t vacc3x0123 = vget_low_f16(vacc3x01234567);
335 float16x4_t vacc2x0123 = vget_low_f16(vacc2x01234567);
336 float16x4_t vacc1x0123 = vget_low_f16(vacc1x01234567);
337 float16x4_t vacc0x0123 = vget_low_f16(vacc0x01234567);
338 if (nc & 4) {
339 vst1_f16(c7, vacc7x0123); c7 += 4;
340 vst1_f16(c6, vacc6x0123); c6 += 4;
341 vst1_f16(c5, vacc5x0123); c5 += 4;
342 vst1_f16(c4, vacc4x0123); c4 += 4;
343 vst1_f16(c3, vacc3x0123); c3 += 4;
344 vst1_f16(c2, vacc2x0123); c2 += 4;
345 vst1_f16(c1, vacc1x0123); c1 += 4;
346 vst1_f16(c0, vacc0x0123); c0 += 4;
347
348 vacc7x0123 = vget_high_f16(vacc7x01234567);
349 vacc6x0123 = vget_high_f16(vacc6x01234567);
350 vacc5x0123 = vget_high_f16(vacc5x01234567);
351 vacc4x0123 = vget_high_f16(vacc4x01234567);
352 vacc3x0123 = vget_high_f16(vacc3x01234567);
353 vacc2x0123 = vget_high_f16(vacc2x01234567);
354 vacc1x0123 = vget_high_f16(vacc1x01234567);
355 vacc0x0123 = vget_high_f16(vacc0x01234567);
356 }
357 if (nc & 2) {
358 vst1_lane_u32((void*) c7, vreinterpret_u32_f16(vacc7x0123), 0); c7 += 2;
359 vst1_lane_u32((void*) c6, vreinterpret_u32_f16(vacc6x0123), 0); c6 += 2;
360 vst1_lane_u32((void*) c5, vreinterpret_u32_f16(vacc5x0123), 0); c5 += 2;
361 vst1_lane_u32((void*) c4, vreinterpret_u32_f16(vacc4x0123), 0); c4 += 2;
362 vst1_lane_u32((void*) c3, vreinterpret_u32_f16(vacc3x0123), 0); c3 += 2;
363 vst1_lane_u32((void*) c2, vreinterpret_u32_f16(vacc2x0123), 0); c2 += 2;
364 vst1_lane_u32((void*) c1, vreinterpret_u32_f16(vacc1x0123), 0); c1 += 2;
365 vst1_lane_u32((void*) c0, vreinterpret_u32_f16(vacc0x0123), 0); c0 += 2;
366
367 vacc7x0123 = vext_f16(vacc7x0123, vacc7x0123, 2);
368 vacc6x0123 = vext_f16(vacc6x0123, vacc6x0123, 2);
369 vacc5x0123 = vext_f16(vacc5x0123, vacc5x0123, 2);
370 vacc4x0123 = vext_f16(vacc4x0123, vacc4x0123, 2);
371 vacc3x0123 = vext_f16(vacc3x0123, vacc3x0123, 2);
372 vacc2x0123 = vext_f16(vacc2x0123, vacc2x0123, 2);
373 vacc1x0123 = vext_f16(vacc1x0123, vacc1x0123, 2);
374 vacc0x0123 = vext_f16(vacc0x0123, vacc0x0123, 2);
375 }
376 if (nc & 1) {
377 vst1_lane_f16(c7, vacc7x0123, 0);
378 vst1_lane_f16(c6, vacc6x0123, 0);
379 vst1_lane_f16(c5, vacc5x0123, 0);
380 vst1_lane_f16(c4, vacc4x0123, 0);
381 vst1_lane_f16(c3, vacc3x0123, 0);
382 vst1_lane_f16(c2, vacc2x0123, 0);
383 vst1_lane_f16(c1, vacc1x0123, 0);
384 vst1_lane_f16(c0, vacc0x0123, 0);
385 }
386
387 nc = 0;
388 }
389 } while (nc != 0);
390 }
391