1 // Auto-generated file. Do not edit!
2 // Template: src/bf16-gemm/c8-neonbf16.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10
11 #include <assert.h>
12
13 #include <arm_neon.h>
14
15 #include <xnnpack/gemm.h>
16
17
xnn_bf16_gemm_minmax_ukernel_3x4c8__neonbf16_bfdot(size_t mr,size_t nc,size_t kc,const void * restrict a,size_t a_stride,const void * restrict w_ptr,void * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_bf16_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_bf16_gemm_minmax_ukernel_3x4c8__neonbf16_bfdot(
19 size_t mr,
20 size_t nc,
21 size_t kc,
22 const void* restrict a,
23 size_t a_stride,
24 const void* restrict w_ptr,
25 void* restrict c,
26 size_t cm_stride,
27 size_t cn_stride,
28 const union xnn_bf16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
29 {
30 assert(mr != 0);
31 assert(mr <= 3);
32 assert(nc != 0);
33 assert(kc != 0);
34 assert(kc % sizeof(bfloat16_t) == 0);
35 assert(a != NULL);
36 assert(w_ptr != NULL);
37 assert(c != NULL);
38
39 const bfloat16_t* a0 = (const bfloat16_t*) a;
40 bfloat16_t* c0 = (bfloat16_t*) c;
41 const bfloat16_t* a1 = (const bfloat16_t*) ((uintptr_t) a0 + a_stride);
42 bfloat16_t* c1 = (bfloat16_t*) ((uintptr_t) c0 + cm_stride);
43 if XNN_UNPREDICTABLE(mr < 2) {
44 a1 = a0;
45 c1 = c0;
46 }
47 const bfloat16_t* a2 = (const bfloat16_t*) ((uintptr_t) a1 + a_stride);
48 bfloat16_t* c2 = (bfloat16_t*) ((uintptr_t) c1 + cm_stride);
49 if XNN_UNPREDICTABLE(mr <= 2) {
50 a2 = a1;
51 c2 = c1;
52 }
53
54 const bfloat16_t* w = (const bfloat16_t*) w_ptr;
55 do {
56 float32x4_t vacc0x0 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
57 float32x4_t vacc0x1 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
58 float32x4_t vacc0x2 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
59 float32x4_t vacc0x3 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
60 float32x4_t vacc1x0 = vacc0x0;
61 float32x4_t vacc1x1 = vacc0x1;
62 float32x4_t vacc1x2 = vacc0x2;
63 float32x4_t vacc1x3 = vacc0x3;
64 float32x4_t vacc2x0 = vacc0x0;
65 float32x4_t vacc2x1 = vacc0x1;
66 float32x4_t vacc2x2 = vacc0x2;
67 float32x4_t vacc2x3 = vacc0x3;
68
69 size_t k = kc;
70 for (; k >= 8 * sizeof(bfloat16_t); k -= 8 * sizeof(bfloat16_t)) {
71 const bfloat16x8_t va0 = vld1q_bf16(a0); a0 += 8;
72 const bfloat16x8_t va1 = vld1q_bf16(a1); a1 += 8;
73 const bfloat16x8_t va2 = vld1q_bf16(a2); a2 += 8;
74
75 const bfloat16x8_t vb0 = vld1q_bf16(w); w += 8;
76 const bfloat16x8_t vb1 = vld1q_bf16(w); w += 8;
77 const bfloat16x8_t vb2 = vld1q_bf16(w); w += 8;
78 const bfloat16x8_t vb3 = vld1q_bf16(w); w += 8;
79
80 vacc0x0 = vbfdotq_f32(vacc0x0, va0, vb0);
81 vacc1x0 = vbfdotq_f32(vacc1x0, va1, vb0);
82 vacc2x0 = vbfdotq_f32(vacc2x0, va2, vb0);
83 vacc0x1 = vbfdotq_f32(vacc0x1, va0, vb1);
84 vacc1x1 = vbfdotq_f32(vacc1x1, va1, vb1);
85 vacc2x1 = vbfdotq_f32(vacc2x1, va2, vb1);
86 vacc0x2 = vbfdotq_f32(vacc0x2, va0, vb2);
87 vacc1x2 = vbfdotq_f32(vacc1x2, va1, vb2);
88 vacc2x2 = vbfdotq_f32(vacc2x2, va2, vb2);
89 vacc0x3 = vbfdotq_f32(vacc0x3, va0, vb3);
90 vacc1x3 = vbfdotq_f32(vacc1x3, va1, vb3);
91 vacc2x3 = vbfdotq_f32(vacc2x3, va2, vb3);
92 }
93 if XNN_UNLIKELY(k != 0) {
94 const bfloat16x8_t va0 = vld1q_bf16(a0); a0 = (const bfloat16_t*) ((uintptr_t) a0 + k);
95 const bfloat16x8_t va1 = vld1q_bf16(a1); a1 = (const bfloat16_t*) ((uintptr_t) a1 + k);
96 const bfloat16x8_t va2 = vld1q_bf16(a2); a2 = (const bfloat16_t*) ((uintptr_t) a2 + k);
97
98 const bfloat16x8_t vb0 = vld1q_bf16(w); w += 8;
99 const bfloat16x8_t vb1 = vld1q_bf16(w); w += 8;
100 const bfloat16x8_t vb2 = vld1q_bf16(w); w += 8;
101 const bfloat16x8_t vb3 = vld1q_bf16(w); w += 8;
102
103 const uint16x8_t vm0 = vceqq_u16(vreinterpretq_u16_bf16(vb0), vmovq_n_u16(0));
104 const uint16x8_t vm1 = vceqq_u16(vreinterpretq_u16_bf16(vb1), vmovq_n_u16(0));
105 const uint16x8_t vm2 = vceqq_u16(vreinterpretq_u16_bf16(vb2), vmovq_n_u16(0));
106 const uint16x8_t vm3 = vceqq_u16(vreinterpretq_u16_bf16(vb3), vmovq_n_u16(0));
107
108 const bfloat16x8_t va0x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm0));
109 vacc0x0 = vbfdotq_f32(vacc0x0, va0x0, vb0);
110 const bfloat16x8_t va1x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm0));
111 vacc1x0 = vbfdotq_f32(vacc1x0, va1x0, vb0);
112 const bfloat16x8_t va2x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va2), vm0));
113 vacc2x0 = vbfdotq_f32(vacc2x0, va2x0, vb0);
114 const bfloat16x8_t va0x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm1));
115 vacc0x1 = vbfdotq_f32(vacc0x1, va0x1, vb1);
116 const bfloat16x8_t va1x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm1));
117 vacc1x1 = vbfdotq_f32(vacc1x1, va1x1, vb1);
118 const bfloat16x8_t va2x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va2), vm1));
119 vacc2x1 = vbfdotq_f32(vacc2x1, va2x1, vb1);
120 const bfloat16x8_t va0x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm2));
121 vacc0x2 = vbfdotq_f32(vacc0x2, va0x2, vb2);
122 const bfloat16x8_t va1x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm2));
123 vacc1x2 = vbfdotq_f32(vacc1x2, va1x2, vb2);
124 const bfloat16x8_t va2x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va2), vm2));
125 vacc2x2 = vbfdotq_f32(vacc2x2, va2x2, vb2);
126 const bfloat16x8_t va0x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm3));
127 vacc0x3 = vbfdotq_f32(vacc0x3, va0x3, vb3);
128 const bfloat16x8_t va1x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm3));
129 vacc1x3 = vbfdotq_f32(vacc1x3, va1x3, vb3);
130 const bfloat16x8_t va2x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va2), vm3));
131 vacc2x3 = vbfdotq_f32(vacc2x3, va2x3, vb3);
132 }
133
134 #if XNN_ARCH_ARM64
135 const float32x4_t vacc0x01 = vpaddq_f32(vacc0x0, vacc0x1);
136 const float32x4_t vacc1x01 = vpaddq_f32(vacc1x0, vacc1x1);
137 const float32x4_t vacc2x01 = vpaddq_f32(vacc2x0, vacc2x1);
138 const float32x4_t vacc0x23 = vpaddq_f32(vacc0x2, vacc0x3);
139 const float32x4_t vacc1x23 = vpaddq_f32(vacc1x2, vacc1x3);
140 const float32x4_t vacc2x23 = vpaddq_f32(vacc2x2, vacc2x3);
141
142 float32x4_t vacc0x0123 = vpaddq_f32(vacc0x01, vacc0x23);
143 float32x4_t vacc1x0123 = vpaddq_f32(vacc1x01, vacc1x23);
144 float32x4_t vacc2x0123 = vpaddq_f32(vacc2x01, vacc2x23);
145 #else
146 const float32x2_t vsum0x0 = vadd_f32(vget_low_f32(vacc0x0), vget_high_f32(vacc0x0));
147 const float32x2_t vsum1x0 = vadd_f32(vget_low_f32(vacc1x0), vget_high_f32(vacc1x0));
148 const float32x2_t vsum2x0 = vadd_f32(vget_low_f32(vacc2x0), vget_high_f32(vacc2x0));
149 const float32x2_t vsum0x1 = vadd_f32(vget_low_f32(vacc0x1), vget_high_f32(vacc0x1));
150 const float32x2_t vsum1x1 = vadd_f32(vget_low_f32(vacc1x1), vget_high_f32(vacc1x1));
151 const float32x2_t vsum2x1 = vadd_f32(vget_low_f32(vacc2x1), vget_high_f32(vacc2x1));
152 const float32x2_t vsum0x2 = vadd_f32(vget_low_f32(vacc0x2), vget_high_f32(vacc0x2));
153 const float32x2_t vsum1x2 = vadd_f32(vget_low_f32(vacc1x2), vget_high_f32(vacc1x2));
154 const float32x2_t vsum2x2 = vadd_f32(vget_low_f32(vacc2x2), vget_high_f32(vacc2x2));
155 const float32x2_t vsum0x3 = vadd_f32(vget_low_f32(vacc0x3), vget_high_f32(vacc0x3));
156 const float32x2_t vsum1x3 = vadd_f32(vget_low_f32(vacc1x3), vget_high_f32(vacc1x3));
157 const float32x2_t vsum2x3 = vadd_f32(vget_low_f32(vacc2x3), vget_high_f32(vacc2x3));
158
159 float32x4_t vacc0x0123 = vcombine_f32(vpadd_f32(vsum0x0, vsum0x1), vpadd_f32(vsum0x2, vsum0x3));
160 float32x4_t vacc1x0123 = vcombine_f32(vpadd_f32(vsum1x0, vsum1x1), vpadd_f32(vsum1x2, vsum1x3));
161 float32x4_t vacc2x0123 = vcombine_f32(vpadd_f32(vsum2x0, vsum2x1), vpadd_f32(vsum2x2, vsum2x3));
162 #endif
163
164 const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
165 vacc0x0123 = vminq_f32(vacc0x0123, vmax);
166 vacc1x0123 = vminq_f32(vacc1x0123, vmax);
167 vacc2x0123 = vminq_f32(vacc2x0123, vmax);
168
169 const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
170 vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
171 vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
172 vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
173
174 bfloat16x4_t vout0x0123 = vcvt_bf16_f32(vacc0x0123);
175 bfloat16x4_t vout1x0123 = vcvt_bf16_f32(vacc1x0123);
176 bfloat16x4_t vout2x0123 = vcvt_bf16_f32(vacc2x0123);
177
178 if XNN_LIKELY(nc >= 4) {
179 vst1_bf16(c0, vout0x0123);
180 c0 = (bfloat16_t*) ((uintptr_t) c0 + cn_stride);
181 vst1_bf16(c1, vout1x0123);
182 c1 = (bfloat16_t*) ((uintptr_t) c1 + cn_stride);
183 vst1_bf16(c2, vout2x0123);
184 c2 = (bfloat16_t*) ((uintptr_t) c2 + cn_stride);
185
186 a0 = (const bfloat16_t*) ((uintptr_t) a0 - kc);
187 a1 = (const bfloat16_t*) ((uintptr_t) a1 - kc);
188 a2 = (const bfloat16_t*) ((uintptr_t) a2 - kc);
189
190 nc -= 4;
191 } else {
192 if (nc & 2) {
193 vst1_lane_u32((void*) c0, vreinterpret_u32_bf16(vout0x0123), 0); c0 += 2;
194 vst1_lane_u32((void*) c1, vreinterpret_u32_bf16(vout1x0123), 0); c1 += 2;
195 vst1_lane_u32((void*) c2, vreinterpret_u32_bf16(vout2x0123), 0); c2 += 2;
196
197 vout0x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout0x0123), vreinterpret_u16_bf16(vout0x0123), 2));
198 vout1x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout1x0123), vreinterpret_u16_bf16(vout1x0123), 2));
199 vout2x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout2x0123), vreinterpret_u16_bf16(vout2x0123), 2));
200 }
201 if (nc & 1) {
202 vst1_lane_bf16(c0, vout0x0123, 0);
203 vst1_lane_bf16(c1, vout1x0123, 0);
204 vst1_lane_bf16(c2, vout2x0123, 0);
205 }
206
207 nc = 0;
208 }
209 } while (nc != 0);
210 }
211