1 // Auto-generated file. Do not edit!
2 // Template: src/f16-gemm/neonfp16arith-ld64.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10
11 #include <assert.h>
12
13 #include <arm_neon.h>
14
15 #include <xnnpack/common.h>
16
17 #include <xnnpack/gemm.h>
18
19
xnn_f16_gemm_minmax_ukernel_6x8__neonfp16arith_ld64(size_t mr,size_t nc,size_t kc,const void * restrict a,size_t a_stride,const void * restrict w,void * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])20 void xnn_f16_gemm_minmax_ukernel_6x8__neonfp16arith_ld64(
21 size_t mr,
22 size_t nc,
23 size_t kc,
24 const void* restrict a,
25 size_t a_stride,
26 const void* restrict w,
27 void* restrict c,
28 size_t cm_stride,
29 size_t cn_stride,
30 const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
31 {
32 assert(mr != 0);
33 assert(mr <= 6);
34 assert(nc != 0);
35 assert(kc != 0);
36 assert(kc % sizeof(__fp16) == 0);
37 assert(a != NULL);
38 assert(w != NULL);
39 assert(c != NULL);
40
41 const __fp16* a0 = (const __fp16*) a;
42 __fp16* c0 = (__fp16*) c;
43 const __fp16* a1 = (const __fp16*) ((uintptr_t) a0 + a_stride);
44 __fp16* c1 = (__fp16*) ((uintptr_t) c0 + cm_stride);
45 if XNN_UNPREDICTABLE(mr < 2) {
46 a1 = a0;
47 c1 = c0;
48 }
49 const __fp16* a2 = (const __fp16*) ((uintptr_t) a1 + a_stride);
50 __fp16* c2 = (__fp16*) ((uintptr_t) c1 + cm_stride);
51 if XNN_UNPREDICTABLE(mr <= 2) {
52 a2 = a1;
53 c2 = c1;
54 }
55 const __fp16* a3 = (const __fp16*) ((uintptr_t) a2 + a_stride);
56 __fp16* c3 = (__fp16*) ((uintptr_t) c2 + cm_stride);
57 if XNN_UNPREDICTABLE(mr < 4) {
58 a3 = a2;
59 c3 = c2;
60 }
61 const __fp16* a4 = (const __fp16*) ((uintptr_t) a3 + a_stride);
62 __fp16* c4 = (__fp16*) ((uintptr_t) c3 + cm_stride);
63 if XNN_UNPREDICTABLE(mr <= 4) {
64 a4 = a3;
65 c4 = c3;
66 }
67 const __fp16* a5 = (const __fp16*) ((uintptr_t) a4 + a_stride);
68 __fp16* c5 = (__fp16*) ((uintptr_t) c4 + cm_stride);
69 if XNN_UNPREDICTABLE(mr != 6) {
70 a5 = a4;
71 c5 = c4;
72 }
73
74 do {
75 float16x8_t vacc0x01234567 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
76 float16x8_t vacc1x01234567 = vacc0x01234567;
77 float16x8_t vacc2x01234567 = vacc0x01234567;
78 float16x8_t vacc3x01234567 = vacc0x01234567;
79 float16x8_t vacc4x01234567 = vacc0x01234567;
80 float16x8_t vacc5x01234567 = vacc0x01234567;
81
82 size_t k = kc;
83 while (k >= 4 * sizeof(__fp16)) {
84 const float16x4_t va0 = vld1_f16(a0); a0 += 4;
85 const float16x4_t va1 = vld1_f16(a1); a1 += 4;
86 const float16x4_t va2 = vld1_f16(a2); a2 += 4;
87 const float16x4_t va3 = vld1_f16(a3); a3 += 4;
88 const float16x4_t va4 = vld1_f16(a4); a4 += 4;
89 const float16x4_t va5 = vld1_f16(a5); a5 += 4;
90
91 const float16x8_t vb01234567c0 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
92
93 #if XNN_ARCH_ARM64
94 vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
95 vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0);
96 vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0);
97 vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0);
98 vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c0, va4, 0);
99 vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c0, va5, 0);
100 #else
101 const float16x8_t va0c0 = vdupq_lane_f16(va0, 0);
102 const float16x8_t va1c0 = vdupq_lane_f16(va1, 0);
103 const float16x8_t va2c0 = vdupq_lane_f16(va2, 0);
104 const float16x8_t va3c0 = vdupq_lane_f16(va3, 0);
105 const float16x8_t va4c0 = vdupq_lane_f16(va4, 0);
106 const float16x8_t va5c0 = vdupq_lane_f16(va5, 0);
107
108 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c0, vb01234567c0);
109 vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c0, vb01234567c0);
110 vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c0, vb01234567c0);
111 vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c0, vb01234567c0);
112 vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c0, vb01234567c0);
113 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c0, vb01234567c0);
114 #endif
115 const float16x8_t vb01234567c1 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
116
117 #if XNN_ARCH_ARM64
118 vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
119 vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c1, va1, 1);
120 vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c1, va2, 1);
121 vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c1, va3, 1);
122 vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c1, va4, 1);
123 vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c1, va5, 1);
124 #else
125 const float16x8_t va0c1 = vdupq_lane_f16(va0, 1);
126 const float16x8_t va1c1 = vdupq_lane_f16(va1, 1);
127 const float16x8_t va2c1 = vdupq_lane_f16(va2, 1);
128 const float16x8_t va3c1 = vdupq_lane_f16(va3, 1);
129 const float16x8_t va4c1 = vdupq_lane_f16(va4, 1);
130 const float16x8_t va5c1 = vdupq_lane_f16(va5, 1);
131
132 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c1, vb01234567c1);
133 vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c1, vb01234567c1);
134 vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c1, vb01234567c1);
135 vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c1, vb01234567c1);
136 vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c1, vb01234567c1);
137 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c1, vb01234567c1);
138 #endif
139 const float16x8_t vb01234567c2 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
140
141 #if XNN_ARCH_ARM64
142 vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
143 vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c2, va1, 2);
144 vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c2, va2, 2);
145 vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c2, va3, 2);
146 vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c2, va4, 2);
147 vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c2, va5, 2);
148 #else
149 const float16x8_t va0c2 = vdupq_lane_f16(va0, 2);
150 const float16x8_t va1c2 = vdupq_lane_f16(va1, 2);
151 const float16x8_t va2c2 = vdupq_lane_f16(va2, 2);
152 const float16x8_t va3c2 = vdupq_lane_f16(va3, 2);
153 const float16x8_t va4c2 = vdupq_lane_f16(va4, 2);
154 const float16x8_t va5c2 = vdupq_lane_f16(va5, 2);
155
156 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c2, vb01234567c2);
157 vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c2, vb01234567c2);
158 vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c2, vb01234567c2);
159 vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c2, vb01234567c2);
160 vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c2, vb01234567c2);
161 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c2, vb01234567c2);
162 #endif
163 const float16x8_t vb01234567c3 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
164
165 #if XNN_ARCH_ARM64
166 vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
167 vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c3, va1, 3);
168 vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c3, va2, 3);
169 vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c3, va3, 3);
170 vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c3, va4, 3);
171 vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c3, va5, 3);
172 #else
173 const float16x8_t va0c3 = vdupq_lane_f16(va0, 3);
174 const float16x8_t va1c3 = vdupq_lane_f16(va1, 3);
175 const float16x8_t va2c3 = vdupq_lane_f16(va2, 3);
176 const float16x8_t va3c3 = vdupq_lane_f16(va3, 3);
177 const float16x8_t va4c3 = vdupq_lane_f16(va4, 3);
178 const float16x8_t va5c3 = vdupq_lane_f16(va5, 3);
179
180 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c3, vb01234567c3);
181 vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c3, vb01234567c3);
182 vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c3, vb01234567c3);
183 vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c3, vb01234567c3);
184 vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c3, vb01234567c3);
185 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c3, vb01234567c3);
186 #endif
187
188 k -= 4 * sizeof(__fp16);
189 }
190 if XNN_UNLIKELY(k != 0) {
191 do {
192 const float16x8_t va0 = vld1q_dup_f16(a0); a0 += 1;
193 const float16x8_t va1 = vld1q_dup_f16(a1); a1 += 1;
194 const float16x8_t va2 = vld1q_dup_f16(a2); a2 += 1;
195 const float16x8_t va3 = vld1q_dup_f16(a3); a3 += 1;
196 const float16x8_t va4 = vld1q_dup_f16(a4); a4 += 1;
197 const float16x8_t va5 = vld1q_dup_f16(a5); a5 += 1;
198
199 const float16x8_t vb01234567 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
200
201 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0, vb01234567);
202 vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1, vb01234567);
203 vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2, vb01234567);
204 vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3, vb01234567);
205 vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4, vb01234567);
206 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5, vb01234567);
207
208 k -= sizeof(__fp16);
209 } while (k != 0);
210 }
211
212
213 const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->neon.max));
214 vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
215 vacc1x01234567 = vminq_f16(vacc1x01234567, vmax);
216 vacc2x01234567 = vminq_f16(vacc2x01234567, vmax);
217 vacc3x01234567 = vminq_f16(vacc3x01234567, vmax);
218 vacc4x01234567 = vminq_f16(vacc4x01234567, vmax);
219 vacc5x01234567 = vminq_f16(vacc5x01234567, vmax);
220
221 const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->neon.min));
222 vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
223 vacc1x01234567 = vmaxq_f16(vacc1x01234567, vmin);
224 vacc2x01234567 = vmaxq_f16(vacc2x01234567, vmin);
225 vacc3x01234567 = vmaxq_f16(vacc3x01234567, vmin);
226 vacc4x01234567 = vmaxq_f16(vacc4x01234567, vmin);
227 vacc5x01234567 = vmaxq_f16(vacc5x01234567, vmin);
228
229 if XNN_LIKELY(nc >= 8) {
230 vst1q_f16(c0, vacc0x01234567);
231 c0 = (__fp16*) ((uintptr_t) c0 + cn_stride);
232 vst1q_f16(c1, vacc1x01234567);
233 c1 = (__fp16*) ((uintptr_t) c1 + cn_stride);
234 vst1q_f16(c2, vacc2x01234567);
235 c2 = (__fp16*) ((uintptr_t) c2 + cn_stride);
236 vst1q_f16(c3, vacc3x01234567);
237 c3 = (__fp16*) ((uintptr_t) c3 + cn_stride);
238 vst1q_f16(c4, vacc4x01234567);
239 c4 = (__fp16*) ((uintptr_t) c4 + cn_stride);
240 vst1q_f16(c5, vacc5x01234567);
241 c5 = (__fp16*) ((uintptr_t) c5 + cn_stride);
242
243 a0 = (const __fp16*) ((uintptr_t) a0 - kc);
244 a1 = (const __fp16*) ((uintptr_t) a1 - kc);
245 a2 = (const __fp16*) ((uintptr_t) a2 - kc);
246 a3 = (const __fp16*) ((uintptr_t) a3 - kc);
247 a4 = (const __fp16*) ((uintptr_t) a4 - kc);
248 a5 = (const __fp16*) ((uintptr_t) a5 - kc);
249
250 nc -= 8;
251 } else {
252 float16x4_t vacc0x0123 = vget_low_f16(vacc0x01234567);
253 float16x4_t vacc1x0123 = vget_low_f16(vacc1x01234567);
254 float16x4_t vacc2x0123 = vget_low_f16(vacc2x01234567);
255 float16x4_t vacc3x0123 = vget_low_f16(vacc3x01234567);
256 float16x4_t vacc4x0123 = vget_low_f16(vacc4x01234567);
257 float16x4_t vacc5x0123 = vget_low_f16(vacc5x01234567);
258 if (nc & 4) {
259 vst1_f16(c0, vacc0x0123); c0 += 4;
260 vst1_f16(c1, vacc1x0123); c1 += 4;
261 vst1_f16(c2, vacc2x0123); c2 += 4;
262 vst1_f16(c3, vacc3x0123); c3 += 4;
263 vst1_f16(c4, vacc4x0123); c4 += 4;
264 vst1_f16(c5, vacc5x0123); c5 += 4;
265
266 vacc0x0123 = vget_high_f16(vacc0x01234567);
267 vacc1x0123 = vget_high_f16(vacc1x01234567);
268 vacc2x0123 = vget_high_f16(vacc2x01234567);
269 vacc3x0123 = vget_high_f16(vacc3x01234567);
270 vacc4x0123 = vget_high_f16(vacc4x01234567);
271 vacc5x0123 = vget_high_f16(vacc5x01234567);
272 }
273 if (nc & 2) {
274 vst1_lane_u32((void*) c0, vreinterpret_u32_f16(vacc0x0123), 0); c0 += 2;
275 vst1_lane_u32((void*) c1, vreinterpret_u32_f16(vacc1x0123), 0); c1 += 2;
276 vst1_lane_u32((void*) c2, vreinterpret_u32_f16(vacc2x0123), 0); c2 += 2;
277 vst1_lane_u32((void*) c3, vreinterpret_u32_f16(vacc3x0123), 0); c3 += 2;
278 vst1_lane_u32((void*) c4, vreinterpret_u32_f16(vacc4x0123), 0); c4 += 2;
279 vst1_lane_u32((void*) c5, vreinterpret_u32_f16(vacc5x0123), 0); c5 += 2;
280
281 vacc0x0123 = vext_f16(vacc0x0123, vacc0x0123, 2);
282 vacc1x0123 = vext_f16(vacc1x0123, vacc1x0123, 2);
283 vacc2x0123 = vext_f16(vacc2x0123, vacc2x0123, 2);
284 vacc3x0123 = vext_f16(vacc3x0123, vacc3x0123, 2);
285 vacc4x0123 = vext_f16(vacc4x0123, vacc4x0123, 2);
286 vacc5x0123 = vext_f16(vacc5x0123, vacc5x0123, 2);
287 }
288 if (nc & 1) {
289 vst1_lane_f16(c0, vacc0x0123, 0);
290 vst1_lane_f16(c1, vacc1x0123, 0);
291 vst1_lane_f16(c2, vacc2x0123, 0);
292 vst1_lane_f16(c3, vacc3x0123, 0);
293 vst1_lane_f16(c4, vacc4x0123, 0);
294 vst1_lane_f16(c5, vacc5x0123, 0);
295 }
296
297 nc = 0;
298 }
299 } while (nc != 0);
300 }
301