1 // Auto-generated file. Do not edit!
2 // Template: src/f16-igemm/neonfp16arith-ld64.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10
11 #include <assert.h>
12
13 #include <arm_neon.h>
14
15 #include <xnnpack/igemm.h>
16
17
xnn_f16_igemm_minmax_ukernel_6x16__neonfp16arith_ld64(size_t mr,size_t nc,size_t kc,size_t ks,const void ** restrict a,const void * restrict w,void * restrict c,size_t cm_stride,size_t cn_stride,size_t a_offset,const void * zero,const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f16_igemm_minmax_ukernel_6x16__neonfp16arith_ld64(
19 size_t mr,
20 size_t nc,
21 size_t kc,
22 size_t ks,
23 const void** restrict a,
24 const void* restrict w,
25 void* restrict c,
26 size_t cm_stride,
27 size_t cn_stride,
28 size_t a_offset,
29 const void* zero,
30 const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
31 {
32 assert(mr != 0);
33 assert(mr <= 6);
34 assert(nc != 0);
35 assert(kc != 0);
36 assert(kc % sizeof(__fp16) == 0);
37 assert(ks != 0);
38 assert(ks % (6 * sizeof(void*)) == 0);
39 assert(a_offset % sizeof(__fp16) == 0);
40 assert(a != NULL);
41 assert(w != NULL);
42 assert(c != NULL);
43
44 __fp16* c0 = (__fp16*) c;
45 __fp16* c1 = (__fp16*) ((uintptr_t) c0 + cm_stride);
46 if XNN_UNPREDICTABLE(mr < 2) {
47 c1 = c0;
48 }
49 __fp16* c2 = (__fp16*) ((uintptr_t) c1 + cm_stride);
50 if XNN_UNPREDICTABLE(mr <= 2) {
51 c2 = c1;
52 }
53 __fp16* c3 = (__fp16*) ((uintptr_t) c2 + cm_stride);
54 if XNN_UNPREDICTABLE(mr < 4) {
55 c3 = c2;
56 }
57 __fp16* c4 = (__fp16*) ((uintptr_t) c3 + cm_stride);
58 if XNN_UNPREDICTABLE(mr <= 4) {
59 c4 = c3;
60 }
61 __fp16* c5 = (__fp16*) ((uintptr_t) c4 + cm_stride);
62 if XNN_UNPREDICTABLE(mr != 6) {
63 c5 = c4;
64 }
65
66 do {
67 float16x8_t vacc0x01234567 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
68 float16x8_t vacc0x89ABCDEF = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
69 float16x8_t vacc1x01234567 = vacc0x01234567;
70 float16x8_t vacc1x89ABCDEF = vacc0x89ABCDEF;
71 float16x8_t vacc2x01234567 = vacc0x01234567;
72 float16x8_t vacc2x89ABCDEF = vacc0x89ABCDEF;
73 float16x8_t vacc3x01234567 = vacc0x01234567;
74 float16x8_t vacc3x89ABCDEF = vacc0x89ABCDEF;
75 float16x8_t vacc4x01234567 = vacc0x01234567;
76 float16x8_t vacc4x89ABCDEF = vacc0x89ABCDEF;
77 float16x8_t vacc5x01234567 = vacc0x01234567;
78 float16x8_t vacc5x89ABCDEF = vacc0x89ABCDEF;
79
80 size_t p = ks;
81 do {
82 const __fp16* restrict a0 = (const __fp16*) a[0];
83 assert(a0 != NULL);
84 if XNN_UNPREDICTABLE(a0 != zero) {
85 a0 = (const __fp16*) ((uintptr_t) a0 + a_offset);
86 }
87 const __fp16* restrict a1 = (const __fp16*) a[1];
88 assert(a1 != NULL);
89 if XNN_UNPREDICTABLE(a1 != zero) {
90 a1 = (const __fp16*) ((uintptr_t) a1 + a_offset);
91 }
92 const __fp16* restrict a2 = (const __fp16*) a[2];
93 assert(a2 != NULL);
94 if XNN_UNPREDICTABLE(a2 != zero) {
95 a2 = (const __fp16*) ((uintptr_t) a2 + a_offset);
96 }
97 const __fp16* restrict a3 = (const __fp16*) a[3];
98 assert(a3 != NULL);
99 if XNN_UNPREDICTABLE(a3 != zero) {
100 a3 = (const __fp16*) ((uintptr_t) a3 + a_offset);
101 }
102 const __fp16* restrict a4 = (const __fp16*) a[4];
103 assert(a4 != NULL);
104 if XNN_UNPREDICTABLE(a4 != zero) {
105 a4 = (const __fp16*) ((uintptr_t) a4 + a_offset);
106 }
107 const __fp16* restrict a5 = (const __fp16*) a[5];
108 assert(a5 != NULL);
109 if XNN_UNPREDICTABLE(a5 != zero) {
110 a5 = (const __fp16*) ((uintptr_t) a5 + a_offset);
111 }
112 a += 6;
113
114 size_t k = kc;
115 for (; k >= 4 * sizeof(__fp16); k -= 4 * sizeof(__fp16)) {
116 const float16x4_t va0 = vld1_f16(a0); a0 += 4;
117 const float16x4_t va1 = vld1_f16(a1); a1 += 4;
118 const float16x4_t va2 = vld1_f16(a2); a2 += 4;
119 const float16x4_t va3 = vld1_f16(a3); a3 += 4;
120 const float16x4_t va4 = vld1_f16(a4); a4 += 4;
121 const float16x4_t va5 = vld1_f16(a5); a5 += 4;
122
123 const float16x8_t vb01234567c0 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
124 const float16x8_t vb89ABCDEFc0 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
125
126 #if XNN_ARCH_ARM64
127 vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
128 vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
129 vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
130 vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
131 vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c0, va4, 0);
132 vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c0, va5, 0);
133 vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc0, va0, 0);
134 vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc0, va1, 0);
135 vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc0, va2, 0);
136 vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc0, va3, 0);
137 vacc4x89ABCDEF = vfmaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc0, va4, 0);
138 vacc5x89ABCDEF = vfmaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc0, va5, 0);
139 #else
140 const float16x8_t va0c0 = vdupq_lane_f16(va0, 0);
141 const float16x8_t va1c0 = vdupq_lane_f16(va1, 0);
142 const float16x8_t va2c0 = vdupq_lane_f16(va2, 0);
143 const float16x8_t va3c0 = vdupq_lane_f16(va3, 0);
144 const float16x8_t va4c0 = vdupq_lane_f16(va4, 0);
145 const float16x8_t va5c0 = vdupq_lane_f16(va5, 0);
146
147 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c0, vb01234567c0);
148 vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c0, vb01234567c0);
149 vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c0, vb01234567c0);
150 vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c0, vb01234567c0);
151 vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c0, vb01234567c0);
152 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c0, vb01234567c0);
153 vacc0x89ABCDEF = vfmaq_f16(vacc0x89ABCDEF, va0c0, vb89ABCDEFc0);
154 vacc1x89ABCDEF = vfmaq_f16(vacc1x89ABCDEF, va1c0, vb89ABCDEFc0);
155 vacc2x89ABCDEF = vfmaq_f16(vacc2x89ABCDEF, va2c0, vb89ABCDEFc0);
156 vacc3x89ABCDEF = vfmaq_f16(vacc3x89ABCDEF, va3c0, vb89ABCDEFc0);
157 vacc4x89ABCDEF = vfmaq_f16(vacc4x89ABCDEF, va4c0, vb89ABCDEFc0);
158 vacc5x89ABCDEF = vfmaq_f16(vacc5x89ABCDEF, va5c0, vb89ABCDEFc0);
159 #endif
160 const float16x8_t vb01234567c1 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
161 const float16x8_t vb89ABCDEFc1 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
162
163 #if XNN_ARCH_ARM64
164 vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
165 vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
166 vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
167 vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
168 vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c1, va4, 1);
169 vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c1, va5, 1);
170 vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc1, va0, 1);
171 vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc1, va1, 1);
172 vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc1, va2, 1);
173 vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc1, va3, 1);
174 vacc4x89ABCDEF = vfmaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc1, va4, 1);
175 vacc5x89ABCDEF = vfmaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc1, va5, 1);
176 #else
177 const float16x8_t va0c1 = vdupq_lane_f16(va0, 1);
178 const float16x8_t va1c1 = vdupq_lane_f16(va1, 1);
179 const float16x8_t va2c1 = vdupq_lane_f16(va2, 1);
180 const float16x8_t va3c1 = vdupq_lane_f16(va3, 1);
181 const float16x8_t va4c1 = vdupq_lane_f16(va4, 1);
182 const float16x8_t va5c1 = vdupq_lane_f16(va5, 1);
183
184 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c1, vb01234567c1);
185 vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c1, vb01234567c1);
186 vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c1, vb01234567c1);
187 vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c1, vb01234567c1);
188 vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c1, vb01234567c1);
189 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c1, vb01234567c1);
190 vacc0x89ABCDEF = vfmaq_f16(vacc0x89ABCDEF, va0c1, vb89ABCDEFc1);
191 vacc1x89ABCDEF = vfmaq_f16(vacc1x89ABCDEF, va1c1, vb89ABCDEFc1);
192 vacc2x89ABCDEF = vfmaq_f16(vacc2x89ABCDEF, va2c1, vb89ABCDEFc1);
193 vacc3x89ABCDEF = vfmaq_f16(vacc3x89ABCDEF, va3c1, vb89ABCDEFc1);
194 vacc4x89ABCDEF = vfmaq_f16(vacc4x89ABCDEF, va4c1, vb89ABCDEFc1);
195 vacc5x89ABCDEF = vfmaq_f16(vacc5x89ABCDEF, va5c1, vb89ABCDEFc1);
196 #endif
197 const float16x8_t vb01234567c2 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
198 const float16x8_t vb89ABCDEFc2 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
199
200 #if XNN_ARCH_ARM64
201 vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
202 vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
203 vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
204 vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
205 vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c2, va4, 2);
206 vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c2, va5, 2);
207 vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc2, va0, 2);
208 vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc2, va1, 2);
209 vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc2, va2, 2);
210 vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc2, va3, 2);
211 vacc4x89ABCDEF = vfmaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc2, va4, 2);
212 vacc5x89ABCDEF = vfmaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc2, va5, 2);
213 #else
214 const float16x8_t va0c2 = vdupq_lane_f16(va0, 2);
215 const float16x8_t va1c2 = vdupq_lane_f16(va1, 2);
216 const float16x8_t va2c2 = vdupq_lane_f16(va2, 2);
217 const float16x8_t va3c2 = vdupq_lane_f16(va3, 2);
218 const float16x8_t va4c2 = vdupq_lane_f16(va4, 2);
219 const float16x8_t va5c2 = vdupq_lane_f16(va5, 2);
220
221 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c2, vb01234567c2);
222 vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c2, vb01234567c2);
223 vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c2, vb01234567c2);
224 vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c2, vb01234567c2);
225 vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c2, vb01234567c2);
226 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c2, vb01234567c2);
227 vacc0x89ABCDEF = vfmaq_f16(vacc0x89ABCDEF, va0c2, vb89ABCDEFc2);
228 vacc1x89ABCDEF = vfmaq_f16(vacc1x89ABCDEF, va1c2, vb89ABCDEFc2);
229 vacc2x89ABCDEF = vfmaq_f16(vacc2x89ABCDEF, va2c2, vb89ABCDEFc2);
230 vacc3x89ABCDEF = vfmaq_f16(vacc3x89ABCDEF, va3c2, vb89ABCDEFc2);
231 vacc4x89ABCDEF = vfmaq_f16(vacc4x89ABCDEF, va4c2, vb89ABCDEFc2);
232 vacc5x89ABCDEF = vfmaq_f16(vacc5x89ABCDEF, va5c2, vb89ABCDEFc2);
233 #endif
234 const float16x8_t vb01234567c3 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
235 const float16x8_t vb89ABCDEFc3 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
236
237 #if XNN_ARCH_ARM64
238 vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
239 vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
240 vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
241 vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
242 vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c3, va4, 3);
243 vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c3, va5, 3);
244 vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc3, va0, 3);
245 vacc1x89ABCDEF = vfmaq_lane_f16(vacc1x89ABCDEF, vb89ABCDEFc3, va1, 3);
246 vacc2x89ABCDEF = vfmaq_lane_f16(vacc2x89ABCDEF, vb89ABCDEFc3, va2, 3);
247 vacc3x89ABCDEF = vfmaq_lane_f16(vacc3x89ABCDEF, vb89ABCDEFc3, va3, 3);
248 vacc4x89ABCDEF = vfmaq_lane_f16(vacc4x89ABCDEF, vb89ABCDEFc3, va4, 3);
249 vacc5x89ABCDEF = vfmaq_lane_f16(vacc5x89ABCDEF, vb89ABCDEFc3, va5, 3);
250 #else
251 const float16x8_t va0c3 = vdupq_lane_f16(va0, 3);
252 const float16x8_t va1c3 = vdupq_lane_f16(va1, 3);
253 const float16x8_t va2c3 = vdupq_lane_f16(va2, 3);
254 const float16x8_t va3c3 = vdupq_lane_f16(va3, 3);
255 const float16x8_t va4c3 = vdupq_lane_f16(va4, 3);
256 const float16x8_t va5c3 = vdupq_lane_f16(va5, 3);
257
258 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c3, vb01234567c3);
259 vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c3, vb01234567c3);
260 vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c3, vb01234567c3);
261 vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c3, vb01234567c3);
262 vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c3, vb01234567c3);
263 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c3, vb01234567c3);
264 vacc0x89ABCDEF = vfmaq_f16(vacc0x89ABCDEF, va0c3, vb89ABCDEFc3);
265 vacc1x89ABCDEF = vfmaq_f16(vacc1x89ABCDEF, va1c3, vb89ABCDEFc3);
266 vacc2x89ABCDEF = vfmaq_f16(vacc2x89ABCDEF, va2c3, vb89ABCDEFc3);
267 vacc3x89ABCDEF = vfmaq_f16(vacc3x89ABCDEF, va3c3, vb89ABCDEFc3);
268 vacc4x89ABCDEF = vfmaq_f16(vacc4x89ABCDEF, va4c3, vb89ABCDEFc3);
269 vacc5x89ABCDEF = vfmaq_f16(vacc5x89ABCDEF, va5c3, vb89ABCDEFc3);
270 #endif
271 }
272 if XNN_UNLIKELY(k != 0) {
273 do {
274 const float16x8_t va0 = vld1q_dup_f16(a0); a0 += 1;
275 const float16x8_t va1 = vld1q_dup_f16(a1); a1 += 1;
276 const float16x8_t va2 = vld1q_dup_f16(a2); a2 += 1;
277 const float16x8_t va3 = vld1q_dup_f16(a3); a3 += 1;
278 const float16x8_t va4 = vld1q_dup_f16(a4); a4 += 1;
279 const float16x8_t va5 = vld1q_dup_f16(a5); a5 += 1;
280
281 const float16x8_t vb01234567 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
282 const float16x8_t vb89ABCDEF = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
283
284 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0, vb01234567);
285 vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1, vb01234567);
286 vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2, vb01234567);
287 vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3, vb01234567);
288 vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4, vb01234567);
289 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5, vb01234567);
290 vacc0x89ABCDEF = vfmaq_f16(vacc0x89ABCDEF, va0, vb89ABCDEF);
291 vacc1x89ABCDEF = vfmaq_f16(vacc1x89ABCDEF, va1, vb89ABCDEF);
292 vacc2x89ABCDEF = vfmaq_f16(vacc2x89ABCDEF, va2, vb89ABCDEF);
293 vacc3x89ABCDEF = vfmaq_f16(vacc3x89ABCDEF, va3, vb89ABCDEF);
294 vacc4x89ABCDEF = vfmaq_f16(vacc4x89ABCDEF, va4, vb89ABCDEF);
295 vacc5x89ABCDEF = vfmaq_f16(vacc5x89ABCDEF, va5, vb89ABCDEF);
296
297 k -= sizeof(__fp16);
298 } while (k != 0);
299 }
300 p -= 6 * sizeof(void*);
301 } while (p != 0);
302
303
304 const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->neon.max));
305 vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
306 vacc1x01234567 = vminq_f16(vacc1x01234567, vmax);
307 vacc2x01234567 = vminq_f16(vacc2x01234567, vmax);
308 vacc3x01234567 = vminq_f16(vacc3x01234567, vmax);
309 vacc4x01234567 = vminq_f16(vacc4x01234567, vmax);
310 vacc5x01234567 = vminq_f16(vacc5x01234567, vmax);
311 vacc0x89ABCDEF = vminq_f16(vacc0x89ABCDEF, vmax);
312 vacc1x89ABCDEF = vminq_f16(vacc1x89ABCDEF, vmax);
313 vacc2x89ABCDEF = vminq_f16(vacc2x89ABCDEF, vmax);
314 vacc3x89ABCDEF = vminq_f16(vacc3x89ABCDEF, vmax);
315 vacc4x89ABCDEF = vminq_f16(vacc4x89ABCDEF, vmax);
316 vacc5x89ABCDEF = vminq_f16(vacc5x89ABCDEF, vmax);
317
318 const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->neon.min));
319 vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
320 vacc1x01234567 = vmaxq_f16(vacc1x01234567, vmin);
321 vacc2x01234567 = vmaxq_f16(vacc2x01234567, vmin);
322 vacc3x01234567 = vmaxq_f16(vacc3x01234567, vmin);
323 vacc4x01234567 = vmaxq_f16(vacc4x01234567, vmin);
324 vacc5x01234567 = vmaxq_f16(vacc5x01234567, vmin);
325 vacc0x89ABCDEF = vmaxq_f16(vacc0x89ABCDEF, vmin);
326 vacc1x89ABCDEF = vmaxq_f16(vacc1x89ABCDEF, vmin);
327 vacc2x89ABCDEF = vmaxq_f16(vacc2x89ABCDEF, vmin);
328 vacc3x89ABCDEF = vmaxq_f16(vacc3x89ABCDEF, vmin);
329 vacc4x89ABCDEF = vmaxq_f16(vacc4x89ABCDEF, vmin);
330 vacc5x89ABCDEF = vmaxq_f16(vacc5x89ABCDEF, vmin);
331
332 if XNN_LIKELY(nc >= 16) {
333 vst1q_f16(c5, vacc5x01234567);
334 vst1q_f16(c5 + 8, vacc5x89ABCDEF);
335 c5 = (__fp16*) ((uintptr_t) c5 + cn_stride);
336 vst1q_f16(c4, vacc4x01234567);
337 vst1q_f16(c4 + 8, vacc4x89ABCDEF);
338 c4 = (__fp16*) ((uintptr_t) c4 + cn_stride);
339 vst1q_f16(c3, vacc3x01234567);
340 vst1q_f16(c3 + 8, vacc3x89ABCDEF);
341 c3 = (__fp16*) ((uintptr_t) c3 + cn_stride);
342 vst1q_f16(c2, vacc2x01234567);
343 vst1q_f16(c2 + 8, vacc2x89ABCDEF);
344 c2 = (__fp16*) ((uintptr_t) c2 + cn_stride);
345 vst1q_f16(c1, vacc1x01234567);
346 vst1q_f16(c1 + 8, vacc1x89ABCDEF);
347 c1 = (__fp16*) ((uintptr_t) c1 + cn_stride);
348 vst1q_f16(c0, vacc0x01234567);
349 vst1q_f16(c0 + 8, vacc0x89ABCDEF);
350 c0 = (__fp16*) ((uintptr_t) c0 + cn_stride);
351
352 a = (const void**restrict) ((uintptr_t) a - ks);
353 nc -= 16;
354 } else {
355 if (nc & 8) {
356 vst1q_f16(c5, vacc5x01234567); c5 += 8;
357 vst1q_f16(c4, vacc4x01234567); c4 += 8;
358 vst1q_f16(c3, vacc3x01234567); c3 += 8;
359 vst1q_f16(c2, vacc2x01234567); c2 += 8;
360 vst1q_f16(c1, vacc1x01234567); c1 += 8;
361 vst1q_f16(c0, vacc0x01234567); c0 += 8;
362
363 vacc5x01234567 = vacc5x89ABCDEF;
364 vacc4x01234567 = vacc4x89ABCDEF;
365 vacc3x01234567 = vacc3x89ABCDEF;
366 vacc2x01234567 = vacc2x89ABCDEF;
367 vacc1x01234567 = vacc1x89ABCDEF;
368 vacc0x01234567 = vacc0x89ABCDEF;
369 }
370 float16x4_t vacc5x0123 = vget_low_f16(vacc5x01234567);
371 float16x4_t vacc4x0123 = vget_low_f16(vacc4x01234567);
372 float16x4_t vacc3x0123 = vget_low_f16(vacc3x01234567);
373 float16x4_t vacc2x0123 = vget_low_f16(vacc2x01234567);
374 float16x4_t vacc1x0123 = vget_low_f16(vacc1x01234567);
375 float16x4_t vacc0x0123 = vget_low_f16(vacc0x01234567);
376 if (nc & 4) {
377 vst1_f16(c5, vacc5x0123); c5 += 4;
378 vst1_f16(c4, vacc4x0123); c4 += 4;
379 vst1_f16(c3, vacc3x0123); c3 += 4;
380 vst1_f16(c2, vacc2x0123); c2 += 4;
381 vst1_f16(c1, vacc1x0123); c1 += 4;
382 vst1_f16(c0, vacc0x0123); c0 += 4;
383
384 vacc5x0123 = vget_high_f16(vacc5x01234567);
385 vacc4x0123 = vget_high_f16(vacc4x01234567);
386 vacc3x0123 = vget_high_f16(vacc3x01234567);
387 vacc2x0123 = vget_high_f16(vacc2x01234567);
388 vacc1x0123 = vget_high_f16(vacc1x01234567);
389 vacc0x0123 = vget_high_f16(vacc0x01234567);
390 }
391 if (nc & 2) {
392 vst1_lane_u32((void*) c5, vreinterpret_u32_f16(vacc5x0123), 0); c5 += 2;
393 vst1_lane_u32((void*) c4, vreinterpret_u32_f16(vacc4x0123), 0); c4 += 2;
394 vst1_lane_u32((void*) c3, vreinterpret_u32_f16(vacc3x0123), 0); c3 += 2;
395 vst1_lane_u32((void*) c2, vreinterpret_u32_f16(vacc2x0123), 0); c2 += 2;
396 vst1_lane_u32((void*) c1, vreinterpret_u32_f16(vacc1x0123), 0); c1 += 2;
397 vst1_lane_u32((void*) c0, vreinterpret_u32_f16(vacc0x0123), 0); c0 += 2;
398
399 vacc5x0123 = vext_f16(vacc5x0123, vacc5x0123, 2);
400 vacc4x0123 = vext_f16(vacc4x0123, vacc4x0123, 2);
401 vacc3x0123 = vext_f16(vacc3x0123, vacc3x0123, 2);
402 vacc2x0123 = vext_f16(vacc2x0123, vacc2x0123, 2);
403 vacc1x0123 = vext_f16(vacc1x0123, vacc1x0123, 2);
404 vacc0x0123 = vext_f16(vacc0x0123, vacc0x0123, 2);
405 }
406 if (nc & 1) {
407 vst1_lane_f16(c5, vacc5x0123, 0);
408 vst1_lane_f16(c4, vacc4x0123, 0);
409 vst1_lane_f16(c3, vacc3x0123, 0);
410 vst1_lane_f16(c2, vacc2x0123, 0);
411 vst1_lane_f16(c1, vacc1x0123, 0);
412 vst1_lane_f16(c0, vacc0x0123, 0);
413 }
414
415 nc = 0;
416 }
417 } while (nc != 0);
418 }
419