1 // Auto-generated file. Do not edit!
2 // Template: src/f16-gemm/neonfp16arith-ld64.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10
11 #include <assert.h>
12
13 #include <arm_neon.h>
14
15 #include <xnnpack/common.h>
16
17 #include <xnnpack/gemm.h>
18
19
xnn_f16_gemm_minmax_ukernel_1x16__neonfp16arith_ld64(size_t mr,size_t nc,size_t kc,const void * restrict a,size_t a_stride,const void * restrict w,void * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])20 void xnn_f16_gemm_minmax_ukernel_1x16__neonfp16arith_ld64(
21 size_t mr,
22 size_t nc,
23 size_t kc,
24 const void* restrict a,
25 size_t a_stride,
26 const void* restrict w,
27 void* restrict c,
28 size_t cm_stride,
29 size_t cn_stride,
30 const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
31 {
32 assert(mr != 0);
33 assert(mr <= 1);
34 assert(nc != 0);
35 assert(kc != 0);
36 assert(kc % sizeof(__fp16) == 0);
37 assert(a != NULL);
38 assert(w != NULL);
39 assert(c != NULL);
40
41 const __fp16* a0 = (const __fp16*) a;
42 __fp16* c0 = (__fp16*) c;
43
44 do {
45 float16x8_t vacc0x01234567 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
46 float16x8_t vacc0x89ABCDEF = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
47
48 size_t k = kc;
49 while (k >= 4 * sizeof(__fp16)) {
50 const float16x4_t va0 = vld1_f16(a0); a0 += 4;
51
52 const float16x8_t vb01234567c0 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
53 const float16x8_t vb89ABCDEFc0 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
54
55 #if XNN_ARCH_ARM64
56 vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0);
57 vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc0, va0, 0);
58 #else
59 const float16x8_t va0c0 = vdupq_lane_f16(va0, 0);
60
61 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c0, vb01234567c0);
62 vacc0x89ABCDEF = vfmaq_f16(vacc0x89ABCDEF, va0c0, vb89ABCDEFc0);
63 #endif
64 const float16x8_t vb01234567c1 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
65 const float16x8_t vb89ABCDEFc1 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
66
67 #if XNN_ARCH_ARM64
68 vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c1, va0, 1);
69 vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc1, va0, 1);
70 #else
71 const float16x8_t va0c1 = vdupq_lane_f16(va0, 1);
72
73 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c1, vb01234567c1);
74 vacc0x89ABCDEF = vfmaq_f16(vacc0x89ABCDEF, va0c1, vb89ABCDEFc1);
75 #endif
76 const float16x8_t vb01234567c2 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
77 const float16x8_t vb89ABCDEFc2 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
78
79 #if XNN_ARCH_ARM64
80 vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c2, va0, 2);
81 vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc2, va0, 2);
82 #else
83 const float16x8_t va0c2 = vdupq_lane_f16(va0, 2);
84
85 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c2, vb01234567c2);
86 vacc0x89ABCDEF = vfmaq_f16(vacc0x89ABCDEF, va0c2, vb89ABCDEFc2);
87 #endif
88 const float16x8_t vb01234567c3 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
89 const float16x8_t vb89ABCDEFc3 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
90
91 #if XNN_ARCH_ARM64
92 vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c3, va0, 3);
93 vacc0x89ABCDEF = vfmaq_lane_f16(vacc0x89ABCDEF, vb89ABCDEFc3, va0, 3);
94 #else
95 const float16x8_t va0c3 = vdupq_lane_f16(va0, 3);
96
97 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c3, vb01234567c3);
98 vacc0x89ABCDEF = vfmaq_f16(vacc0x89ABCDEF, va0c3, vb89ABCDEFc3);
99 #endif
100
101 k -= 4 * sizeof(__fp16);
102 }
103 if XNN_UNLIKELY(k != 0) {
104 do {
105 const float16x8_t va0 = vld1q_dup_f16(a0); a0 += 1;
106
107 const float16x8_t vb01234567 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
108 const float16x8_t vb89ABCDEF = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t));
109
110 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0, vb01234567);
111 vacc0x89ABCDEF = vfmaq_f16(vacc0x89ABCDEF, va0, vb89ABCDEF);
112
113 k -= sizeof(__fp16);
114 } while (k != 0);
115 }
116
117
118 const float16x8_t vmax = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->neon.max));
119 vacc0x01234567 = vminq_f16(vacc0x01234567, vmax);
120 vacc0x89ABCDEF = vminq_f16(vacc0x89ABCDEF, vmax);
121
122 const float16x8_t vmin = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->neon.min));
123 vacc0x01234567 = vmaxq_f16(vacc0x01234567, vmin);
124 vacc0x89ABCDEF = vmaxq_f16(vacc0x89ABCDEF, vmin);
125
126 if XNN_LIKELY(nc >= 16) {
127 vst1q_f16(c0, vacc0x01234567);
128 vst1q_f16(c0 + 8, vacc0x89ABCDEF);
129 c0 = (__fp16*) ((uintptr_t) c0 + cn_stride);
130
131 a0 = (const __fp16*) ((uintptr_t) a0 - kc);
132
133 nc -= 16;
134 } else {
135 if (nc & 8) {
136 vst1q_f16(c0, vacc0x01234567); c0 += 8;
137
138 vacc0x01234567 = vacc0x89ABCDEF;
139 }
140 float16x4_t vacc0x0123 = vget_low_f16(vacc0x01234567);
141 if (nc & 4) {
142 vst1_f16(c0, vacc0x0123); c0 += 4;
143
144 vacc0x0123 = vget_high_f16(vacc0x01234567);
145 }
146 if (nc & 2) {
147 vst1_lane_u32((void*) c0, vreinterpret_u32_f16(vacc0x0123), 0); c0 += 2;
148
149 vacc0x0123 = vext_f16(vacc0x0123, vacc0x0123, 2);
150 }
151 if (nc & 1) {
152 vst1_lane_f16(c0, vacc0x0123, 0);
153 }
154
155 nc = 0;
156 }
157 } while (nc != 0);
158 }
159