1 // Auto-generated file. Do not edit!
2 // Template: src/f16-igemm/neonfp16arith-ld64.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10
11 #include <assert.h>
12
13 #include <arm_neon.h>
14
15 #include <xnnpack/igemm.h>
16
17
xnn_f16_igemm_minmax_ukernel_6x8__neonfp16arith_ld64(size_t mr,size_t nc,size_t kc,size_t ks,const void ** restrict a,const void * restrict w,void * restrict c,size_t cm_stride,size_t cn_stride,size_t a_offset,const void * zero,const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f16_igemm_minmax_ukernel_6x8__neonfp16arith_ld64(
19 size_t mr,
20 size_t nc,
21 size_t kc,
22 size_t ks,
23 const void** restrict a,
24 const void* restrict w,
25 void* restrict c,
26 size_t cm_stride,
27 size_t cn_stride,
28 size_t a_offset,
29 const void* zero,
30 const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
31 {
32 assert(mr != 0);
33 assert(mr <= 6);
34 assert(nc != 0);
35 assert(kc != 0);
36 assert(kc % sizeof(__fp16) == 0);
37 assert(ks != 0);
38 assert(ks % (6 * sizeof(void*)) == 0);
39 assert(a_offset % sizeof(__fp16) == 0);
40 assert(a != NULL);
41 assert(w != NULL);
42 assert(c != NULL);
43
44 __fp16* c0 = (__fp16*) c;
45 __fp16* c1 = (__fp16*) ((uintptr_t) c0 + cm_stride);
46 if XNN_UNPREDICTABLE(mr < 2) {
47 c1 = c0;
48 }
49 __fp16* c2 = (__fp16*) ((uintptr_t) c1 + cm_stride);
50 if XNN_UNPREDICTABLE(mr <= 2) {
51 c2 = c1;
52 }
53 __fp16* c3 = (__fp16*) ((uintptr_t) c2 + cm_stride);
54 if XNN_UNPREDICTABLE(mr < 4) {
55 c3 = c2;
56 }
57 __fp16* c4 = (__fp16*) ((uintptr_t) c3 + cm_stride);
58 if XNN_UNPREDICTABLE(mr <= 4) {
59 c4 = c3;
60 }
61 __fp16* c5 = (__fp16*) ((uintptr_t) c4 + cm_stride);
62 if XNN_UNPREDICTABLE(mr != 6) {
63 c5 = c4;
64 }
65
66 do {
67 float16x8_t vacc0x01234567 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
68 float16x8_t vacc1x01234567 = vacc0x01234567;
69 float16x8_t vacc2x01234567 = vacc0x01234567;
70 float16x8_t vacc3x01234567 = vacc0x01234567;
71 float16x8_t vacc4x01234567 = vacc0x01234567;
72 float16x8_t vacc5x01234567 = vacc0x01234567;
73
74 size_t p = ks;
75 do {
76 const __fp16* restrict a0 = (const __fp16*) a[0];
77 assert(a0 != NULL);
78 if XNN_UNPREDICTABLE(a0 != zero) {
79 a0 = (const __fp16*) ((uintptr_t) a0 + a_offset);
80 }
81 const __fp16* restrict a1 = (const __fp16*) a[1];
82 assert(a1 != NULL);
83 if XNN_UNPREDICTABLE(a1 != zero) {
84 a1 = (const __fp16*) ((uintptr_t) a1 + a_offset);
85 }
86 const __fp16* restrict a2 = (const __fp16*) a[2];
87 assert(a2 != NULL);
88 if XNN_UNPREDICTABLE(a2 != zero) {
89 a2 = (const __fp16*) ((uintptr_t) a2 + a_offset);
90 }
91 const __fp16* restrict a3 = (const __fp16*) a[3];
92 assert(a3 != NULL);
93 if XNN_UNPREDICTABLE(a3 != zero) {
94 a3 = (const __fp16*) ((uintptr_t) a3 + a_offset);
95 }
96 const __fp16* restrict a4 = (const __fp16*) a[4];
97 assert(a4 != NULL);
98 if XNN_UNPREDICTABLE(a4 != zero) {
99 a4 = (const __fp16*) ((uintptr_t) a4 + a_offset);
100 }
101 const __fp16* restrict a5 = (const __fp16*) a[5];
102 assert(a5 != NULL);
103 if XNN_UNPREDICTABLE(a5 != zero) {
104 a5 = (const __fp16*) ((uintptr_t) a5 + a_offset);
105 }
106 a += 6;
107
108 size_t k = kc;
109 for (; k >= 4 * sizeof(__fp16); k -= 4 * sizeof(__fp16)) {
110 const float16x4_t va0 = vld1_f16(a0); a0 += 4;
111 const float16x4_t va1 = vld1_f16(a1); a1 += 4;
112 const float16x4_t va2 = vld1_f16(a2); a2 += 4;
113 const float16x4_t va3 = vld1_f16(a3); a3 += 4;
114 const float16x4_t va4 = vld1_f16(a4); a4 += 4;
115 const float16x4_t va5 = vld1_f16(a5); a5 += 4;
116
117 const float16x8_t vb01234567c0 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
118
119 #if XNN_ARCH_ARM64
120 vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
121 vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
122 vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
123 vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
124 vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c0, va4, 0);
125 vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c0, va5, 0);
126 #else
127 const float16x8_t va0c0 = vdupq_lane_f16(va0, 0);
128 const float16x8_t va1c0 = vdupq_lane_f16(va1, 0);
129 const float16x8_t va2c0 = vdupq_lane_f16(va2, 0);
130 const float16x8_t va3c0 = vdupq_lane_f16(va3, 0);
131 const float16x8_t va4c0 = vdupq_lane_f16(va4, 0);
132 const float16x8_t va5c0 = vdupq_lane_f16(va5, 0);
133
134 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c0, vb01234567c0);
135 vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c0, vb01234567c0);
136 vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c0, vb01234567c0);
137 vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c0, vb01234567c0);
138 vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c0, vb01234567c0);
139 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c0, vb01234567c0);
140 #endif
141 const float16x8_t vb01234567c1 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
142
143 #if XNN_ARCH_ARM64
144 vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
145 vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
146 vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
147 vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
148 vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c1, va4, 1);
149 vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c1, va5, 1);
150 #else
151 const float16x8_t va0c1 = vdupq_lane_f16(va0, 1);
152 const float16x8_t va1c1 = vdupq_lane_f16(va1, 1);
153 const float16x8_t va2c1 = vdupq_lane_f16(va2, 1);
154 const float16x8_t va3c1 = vdupq_lane_f16(va3, 1);
155 const float16x8_t va4c1 = vdupq_lane_f16(va4, 1);
156 const float16x8_t va5c1 = vdupq_lane_f16(va5, 1);
157
158 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c1, vb01234567c1);
159 vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c1, vb01234567c1);
160 vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c1, vb01234567c1);
161 vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c1, vb01234567c1);
162 vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c1, vb01234567c1);
163 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c1, vb01234567c1);
164 #endif
165 const float16x8_t vb01234567c2 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
166
167 #if XNN_ARCH_ARM64
168 vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
169 vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
170 vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
171 vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
172 vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c2, va4, 2);
173 vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c2, va5, 2);
174 #else
175 const float16x8_t va0c2 = vdupq_lane_f16(va0, 2);
176 const float16x8_t va1c2 = vdupq_lane_f16(va1, 2);
177 const float16x8_t va2c2 = vdupq_lane_f16(va2, 2);
178 const float16x8_t va3c2 = vdupq_lane_f16(va3, 2);
179 const float16x8_t va4c2 = vdupq_lane_f16(va4, 2);
180 const float16x8_t va5c2 = vdupq_lane_f16(va5, 2);
181
182 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c2, vb01234567c2);
183 vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c2, vb01234567c2);
184 vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c2, vb01234567c2);
185 vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c2, vb01234567c2);
186 vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c2, vb01234567c2);
187 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c2, vb01234567c2);
188 #endif
189 const float16x8_t vb01234567c3 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
190
191 #if XNN_ARCH_ARM64
192 vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
193 vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
194 vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
195 vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
196 vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c3, va4, 3);
197 vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c3, va5, 3);
198 #else
199 const float16x8_t va0c3 = vdupq_lane_f16(va0, 3);
200 const float16x8_t va1c3 = vdupq_lane_f16(va1, 3);
201 const float16x8_t va2c3 = vdupq_lane_f16(va2, 3);
202 const float16x8_t va3c3 = vdupq_lane_f16(va3, 3);
203 const float16x8_t va4c3 = vdupq_lane_f16(va4, 3);
204 const float16x8_t va5c3 = vdupq_lane_f16(va5, 3);
205
206 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c3, vb01234567c3);
207 vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c3, vb01234567c3);
208 vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c3, vb01234567c3);
209 vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c3, vb01234567c3);
210 vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c3, vb01234567c3);
211 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c3, vb01234567c3);
212 #endif
213 }
214 if XNN_UNLIKELY(k != 0) {
215 do {
216 const float16x8_t va0 = vld1q_dup_f16(a0); a0 += 1;
217 const float16x8_t va1 = vld1q_dup_f16(a1); a1 += 1;
218 const float16x8_t va2 = vld1q_dup_f16(a2); a2 += 1;
219 const float16x8_t va3 = vld1q_dup_f16(a3); a3 += 1;
220 const float16x8_t va4 = vld1q_dup_f16(a4); a4 += 1;
221 const float16x8_t va5 = vld1q_dup_f16(a5); a5 += 1;
222
223 const float16x8_t vb01234567 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
224
225 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0, vb01234567);
226 vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1, vb01234567);
227 vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2, vb01234567);
228 vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3, vb01234567);
229 vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4, vb01234567);
230 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5, vb01234567);
231
232 k -= sizeof(__fp16);
233 } while (k != 0);
234 }
235 p -= 6 * sizeof(void*);
236 } while (p != 0);
237
238
239 const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->neon.max));
240 vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
241 vacc1x01234567 = vminq_f16(vacc1x01234567, vmax);
242 vacc2x01234567 = vminq_f16(vacc2x01234567, vmax);
243 vacc3x01234567 = vminq_f16(vacc3x01234567, vmax);
244 vacc4x01234567 = vminq_f16(vacc4x01234567, vmax);
245 vacc5x01234567 = vminq_f16(vacc5x01234567, vmax);
246
247 const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->neon.min));
248 vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
249 vacc1x01234567 = vmaxq_f16(vacc1x01234567, vmin);
250 vacc2x01234567 = vmaxq_f16(vacc2x01234567, vmin);
251 vacc3x01234567 = vmaxq_f16(vacc3x01234567, vmin);
252 vacc4x01234567 = vmaxq_f16(vacc4x01234567, vmin);
253 vacc5x01234567 = vmaxq_f16(vacc5x01234567, vmin);
254
255 if XNN_LIKELY(nc >= 8) {
256 vst1q_f16(c5, vacc5x01234567);
257 c5 = (__fp16*) ((uintptr_t) c5 + cn_stride);
258 vst1q_f16(c4, vacc4x01234567);
259 c4 = (__fp16*) ((uintptr_t) c4 + cn_stride);
260 vst1q_f16(c3, vacc3x01234567);
261 c3 = (__fp16*) ((uintptr_t) c3 + cn_stride);
262 vst1q_f16(c2, vacc2x01234567);
263 c2 = (__fp16*) ((uintptr_t) c2 + cn_stride);
264 vst1q_f16(c1, vacc1x01234567);
265 c1 = (__fp16*) ((uintptr_t) c1 + cn_stride);
266 vst1q_f16(c0, vacc0x01234567);
267 c0 = (__fp16*) ((uintptr_t) c0 + cn_stride);
268
269 a = (const void**restrict) ((uintptr_t) a - ks);
270 nc -= 8;
271 } else {
272 float16x4_t vacc5x0123 = vget_low_f16(vacc5x01234567);
273 float16x4_t vacc4x0123 = vget_low_f16(vacc4x01234567);
274 float16x4_t vacc3x0123 = vget_low_f16(vacc3x01234567);
275 float16x4_t vacc2x0123 = vget_low_f16(vacc2x01234567);
276 float16x4_t vacc1x0123 = vget_low_f16(vacc1x01234567);
277 float16x4_t vacc0x0123 = vget_low_f16(vacc0x01234567);
278 if (nc & 4) {
279 vst1_f16(c5, vacc5x0123); c5 += 4;
280 vst1_f16(c4, vacc4x0123); c4 += 4;
281 vst1_f16(c3, vacc3x0123); c3 += 4;
282 vst1_f16(c2, vacc2x0123); c2 += 4;
283 vst1_f16(c1, vacc1x0123); c1 += 4;
284 vst1_f16(c0, vacc0x0123); c0 += 4;
285
286 vacc5x0123 = vget_high_f16(vacc5x01234567);
287 vacc4x0123 = vget_high_f16(vacc4x01234567);
288 vacc3x0123 = vget_high_f16(vacc3x01234567);
289 vacc2x0123 = vget_high_f16(vacc2x01234567);
290 vacc1x0123 = vget_high_f16(vacc1x01234567);
291 vacc0x0123 = vget_high_f16(vacc0x01234567);
292 }
293 if (nc & 2) {
294 vst1_lane_u32((void*) c5, vreinterpret_u32_f16(vacc5x0123), 0); c5 += 2;
295 vst1_lane_u32((void*) c4, vreinterpret_u32_f16(vacc4x0123), 0); c4 += 2;
296 vst1_lane_u32((void*) c3, vreinterpret_u32_f16(vacc3x0123), 0); c3 += 2;
297 vst1_lane_u32((void*) c2, vreinterpret_u32_f16(vacc2x0123), 0); c2 += 2;
298 vst1_lane_u32((void*) c1, vreinterpret_u32_f16(vacc1x0123), 0); c1 += 2;
299 vst1_lane_u32((void*) c0, vreinterpret_u32_f16(vacc0x0123), 0); c0 += 2;
300
301 vacc5x0123 = vext_f16(vacc5x0123, vacc5x0123, 2);
302 vacc4x0123 = vext_f16(vacc4x0123, vacc4x0123, 2);
303 vacc3x0123 = vext_f16(vacc3x0123, vacc3x0123, 2);
304 vacc2x0123 = vext_f16(vacc2x0123, vacc2x0123, 2);
305 vacc1x0123 = vext_f16(vacc1x0123, vacc1x0123, 2);
306 vacc0x0123 = vext_f16(vacc0x0123, vacc0x0123, 2);
307 }
308 if (nc & 1) {
309 vst1_lane_f16(c5, vacc5x0123, 0);
310 vst1_lane_f16(c4, vacc4x0123, 0);
311 vst1_lane_f16(c3, vacc3x0123, 0);
312 vst1_lane_f16(c2, vacc2x0123, 0);
313 vst1_lane_f16(c1, vacc1x0123, 0);
314 vst1_lane_f16(c0, vacc0x0123, 0);
315 }
316
317 nc = 0;
318 }
319 } while (nc != 0);
320 }
321