1 // Auto-generated file. Do not edit!
2 // Template: src/bf16-gemm/c8-neonbf16.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10
11 #include <assert.h>
12
13 #include <arm_neon.h>
14
15 #include <xnnpack/gemm.h>
16
17
xnn_bf16_gemm_minmax_ukernel_4x4c8__neonbf16_bfmlal(size_t mr,size_t nc,size_t kc,const void * restrict a,size_t a_stride,const void * restrict w_ptr,void * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_bf16_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_bf16_gemm_minmax_ukernel_4x4c8__neonbf16_bfmlal(
19 size_t mr,
20 size_t nc,
21 size_t kc,
22 const void* restrict a,
23 size_t a_stride,
24 const void* restrict w_ptr,
25 void* restrict c,
26 size_t cm_stride,
27 size_t cn_stride,
28 const union xnn_bf16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
29 {
30 assert(mr != 0);
31 assert(mr <= 4);
32 assert(nc != 0);
33 assert(kc != 0);
34 assert(kc % sizeof(bfloat16_t) == 0);
35 assert(a != NULL);
36 assert(w_ptr != NULL);
37 assert(c != NULL);
38
39 const bfloat16_t* a0 = (const bfloat16_t*) a;
40 bfloat16_t* c0 = (bfloat16_t*) c;
41 const bfloat16_t* a1 = (const bfloat16_t*) ((uintptr_t) a0 + a_stride);
42 bfloat16_t* c1 = (bfloat16_t*) ((uintptr_t) c0 + cm_stride);
43 if XNN_UNPREDICTABLE(mr < 2) {
44 a1 = a0;
45 c1 = c0;
46 }
47 const bfloat16_t* a2 = (const bfloat16_t*) ((uintptr_t) a1 + a_stride);
48 bfloat16_t* c2 = (bfloat16_t*) ((uintptr_t) c1 + cm_stride);
49 if XNN_UNPREDICTABLE(mr <= 2) {
50 a2 = a1;
51 c2 = c1;
52 }
53 const bfloat16_t* a3 = (const bfloat16_t*) ((uintptr_t) a2 + a_stride);
54 bfloat16_t* c3 = (bfloat16_t*) ((uintptr_t) c2 + cm_stride);
55 if XNN_UNPREDICTABLE(mr != 4) {
56 a3 = a2;
57 c3 = c2;
58 }
59
60 const bfloat16_t* w = (const bfloat16_t*) w_ptr;
61 do {
62 float32x4_t vacc0x0 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
63 float32x4_t vacc0x1 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
64 float32x4_t vacc0x2 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
65 float32x4_t vacc0x3 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
66 float32x4_t vacc1x0 = vacc0x0;
67 float32x4_t vacc1x1 = vacc0x1;
68 float32x4_t vacc1x2 = vacc0x2;
69 float32x4_t vacc1x3 = vacc0x3;
70 float32x4_t vacc2x0 = vacc0x0;
71 float32x4_t vacc2x1 = vacc0x1;
72 float32x4_t vacc2x2 = vacc0x2;
73 float32x4_t vacc2x3 = vacc0x3;
74 float32x4_t vacc3x0 = vacc0x0;
75 float32x4_t vacc3x1 = vacc0x1;
76 float32x4_t vacc3x2 = vacc0x2;
77 float32x4_t vacc3x3 = vacc0x3;
78
79 size_t k = kc;
80 for (; k >= 8 * sizeof(bfloat16_t); k -= 8 * sizeof(bfloat16_t)) {
81 const bfloat16x8_t va0 = vld1q_bf16(a0); a0 += 8;
82 const bfloat16x8_t va1 = vld1q_bf16(a1); a1 += 8;
83 const bfloat16x8_t va2 = vld1q_bf16(a2); a2 += 8;
84 const bfloat16x8_t va3 = vld1q_bf16(a3); a3 += 8;
85
86 const bfloat16x8_t vb0 = vld1q_bf16(w); w += 8;
87 const bfloat16x8_t vb1 = vld1q_bf16(w); w += 8;
88 const bfloat16x8_t vb2 = vld1q_bf16(w); w += 8;
89 const bfloat16x8_t vb3 = vld1q_bf16(w); w += 8;
90
91 vacc0x0 = vbfmlalbq_f32(vacc0x0, va0, vb0);
92 vacc1x0 = vbfmlalbq_f32(vacc1x0, va1, vb0);
93 vacc2x0 = vbfmlalbq_f32(vacc2x0, va2, vb0);
94 vacc3x0 = vbfmlalbq_f32(vacc3x0, va3, vb0);
95 vacc0x1 = vbfmlalbq_f32(vacc0x1, va0, vb1);
96 vacc1x1 = vbfmlalbq_f32(vacc1x1, va1, vb1);
97 vacc2x1 = vbfmlalbq_f32(vacc2x1, va2, vb1);
98 vacc3x1 = vbfmlalbq_f32(vacc3x1, va3, vb1);
99 vacc0x2 = vbfmlalbq_f32(vacc0x2, va0, vb2);
100 vacc1x2 = vbfmlalbq_f32(vacc1x2, va1, vb2);
101 vacc2x2 = vbfmlalbq_f32(vacc2x2, va2, vb2);
102 vacc3x2 = vbfmlalbq_f32(vacc3x2, va3, vb2);
103 vacc0x3 = vbfmlalbq_f32(vacc0x3, va0, vb3);
104 vacc1x3 = vbfmlalbq_f32(vacc1x3, va1, vb3);
105 vacc2x3 = vbfmlalbq_f32(vacc2x3, va2, vb3);
106 vacc3x3 = vbfmlalbq_f32(vacc3x3, va3, vb3);
107
108 vacc0x0 = vbfmlaltq_f32(vacc0x0, va0, vb0);
109 vacc1x0 = vbfmlaltq_f32(vacc1x0, va1, vb0);
110 vacc2x0 = vbfmlaltq_f32(vacc2x0, va2, vb0);
111 vacc3x0 = vbfmlaltq_f32(vacc3x0, va3, vb0);
112 vacc0x1 = vbfmlaltq_f32(vacc0x1, va0, vb1);
113 vacc1x1 = vbfmlaltq_f32(vacc1x1, va1, vb1);
114 vacc2x1 = vbfmlaltq_f32(vacc2x1, va2, vb1);
115 vacc3x1 = vbfmlaltq_f32(vacc3x1, va3, vb1);
116 vacc0x2 = vbfmlaltq_f32(vacc0x2, va0, vb2);
117 vacc1x2 = vbfmlaltq_f32(vacc1x2, va1, vb2);
118 vacc2x2 = vbfmlaltq_f32(vacc2x2, va2, vb2);
119 vacc3x2 = vbfmlaltq_f32(vacc3x2, va3, vb2);
120 vacc0x3 = vbfmlaltq_f32(vacc0x3, va0, vb3);
121 vacc1x3 = vbfmlaltq_f32(vacc1x3, va1, vb3);
122 vacc2x3 = vbfmlaltq_f32(vacc2x3, va2, vb3);
123 vacc3x3 = vbfmlaltq_f32(vacc3x3, va3, vb3);
124 }
125 if XNN_UNLIKELY(k != 0) {
126 const bfloat16x8_t va0 = vld1q_bf16(a0); a0 = (const bfloat16_t*) ((uintptr_t) a0 + k);
127 const bfloat16x8_t va1 = vld1q_bf16(a1); a1 = (const bfloat16_t*) ((uintptr_t) a1 + k);
128 const bfloat16x8_t va2 = vld1q_bf16(a2); a2 = (const bfloat16_t*) ((uintptr_t) a2 + k);
129 const bfloat16x8_t va3 = vld1q_bf16(a3); a3 = (const bfloat16_t*) ((uintptr_t) a3 + k);
130
131 const bfloat16x8_t vb0 = vld1q_bf16(w); w += 8;
132 const bfloat16x8_t vb1 = vld1q_bf16(w); w += 8;
133 const bfloat16x8_t vb2 = vld1q_bf16(w); w += 8;
134 const bfloat16x8_t vb3 = vld1q_bf16(w); w += 8;
135
136 const uint16x8_t vm0 = vceqq_u16(vreinterpretq_u16_bf16(vb0), vmovq_n_u16(0));
137 const uint16x8_t vm1 = vceqq_u16(vreinterpretq_u16_bf16(vb1), vmovq_n_u16(0));
138 const uint16x8_t vm2 = vceqq_u16(vreinterpretq_u16_bf16(vb2), vmovq_n_u16(0));
139 const uint16x8_t vm3 = vceqq_u16(vreinterpretq_u16_bf16(vb3), vmovq_n_u16(0));
140
141 const bfloat16x8_t va0x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm0));
142 vacc0x0 = vbfmlalbq_f32(vacc0x0, va0x0, vb0);
143 vacc0x0 = vbfmlaltq_f32(vacc0x0, va0x0, vb0);
144 const bfloat16x8_t va1x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm0));
145 vacc1x0 = vbfmlalbq_f32(vacc1x0, va1x0, vb0);
146 vacc1x0 = vbfmlaltq_f32(vacc1x0, va1x0, vb0);
147 const bfloat16x8_t va2x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va2), vm0));
148 vacc2x0 = vbfmlalbq_f32(vacc2x0, va2x0, vb0);
149 vacc2x0 = vbfmlaltq_f32(vacc2x0, va2x0, vb0);
150 const bfloat16x8_t va3x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va3), vm0));
151 vacc3x0 = vbfmlalbq_f32(vacc3x0, va3x0, vb0);
152 vacc3x0 = vbfmlaltq_f32(vacc3x0, va3x0, vb0);
153 const bfloat16x8_t va0x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm1));
154 vacc0x1 = vbfmlalbq_f32(vacc0x1, va0x1, vb1);
155 vacc0x1 = vbfmlaltq_f32(vacc0x1, va0x1, vb1);
156 const bfloat16x8_t va1x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm1));
157 vacc1x1 = vbfmlalbq_f32(vacc1x1, va1x1, vb1);
158 vacc1x1 = vbfmlaltq_f32(vacc1x1, va1x1, vb1);
159 const bfloat16x8_t va2x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va2), vm1));
160 vacc2x1 = vbfmlalbq_f32(vacc2x1, va2x1, vb1);
161 vacc2x1 = vbfmlaltq_f32(vacc2x1, va2x1, vb1);
162 const bfloat16x8_t va3x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va3), vm1));
163 vacc3x1 = vbfmlalbq_f32(vacc3x1, va3x1, vb1);
164 vacc3x1 = vbfmlaltq_f32(vacc3x1, va3x1, vb1);
165 const bfloat16x8_t va0x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm2));
166 vacc0x2 = vbfmlalbq_f32(vacc0x2, va0x2, vb2);
167 vacc0x2 = vbfmlaltq_f32(vacc0x2, va0x2, vb2);
168 const bfloat16x8_t va1x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm2));
169 vacc1x2 = vbfmlalbq_f32(vacc1x2, va1x2, vb2);
170 vacc1x2 = vbfmlaltq_f32(vacc1x2, va1x2, vb2);
171 const bfloat16x8_t va2x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va2), vm2));
172 vacc2x2 = vbfmlalbq_f32(vacc2x2, va2x2, vb2);
173 vacc2x2 = vbfmlaltq_f32(vacc2x2, va2x2, vb2);
174 const bfloat16x8_t va3x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va3), vm2));
175 vacc3x2 = vbfmlalbq_f32(vacc3x2, va3x2, vb2);
176 vacc3x2 = vbfmlaltq_f32(vacc3x2, va3x2, vb2);
177 const bfloat16x8_t va0x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm3));
178 vacc0x3 = vbfmlalbq_f32(vacc0x3, va0x3, vb3);
179 vacc0x3 = vbfmlaltq_f32(vacc0x3, va0x3, vb3);
180 const bfloat16x8_t va1x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm3));
181 vacc1x3 = vbfmlalbq_f32(vacc1x3, va1x3, vb3);
182 vacc1x3 = vbfmlaltq_f32(vacc1x3, va1x3, vb3);
183 const bfloat16x8_t va2x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va2), vm3));
184 vacc2x3 = vbfmlalbq_f32(vacc2x3, va2x3, vb3);
185 vacc2x3 = vbfmlaltq_f32(vacc2x3, va2x3, vb3);
186 const bfloat16x8_t va3x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va3), vm3));
187 vacc3x3 = vbfmlalbq_f32(vacc3x3, va3x3, vb3);
188 vacc3x3 = vbfmlaltq_f32(vacc3x3, va3x3, vb3);
189 }
190
191 #if XNN_ARCH_ARM64
192 const float32x4_t vacc0x01 = vpaddq_f32(vacc0x0, vacc0x1);
193 const float32x4_t vacc1x01 = vpaddq_f32(vacc1x0, vacc1x1);
194 const float32x4_t vacc2x01 = vpaddq_f32(vacc2x0, vacc2x1);
195 const float32x4_t vacc3x01 = vpaddq_f32(vacc3x0, vacc3x1);
196 const float32x4_t vacc0x23 = vpaddq_f32(vacc0x2, vacc0x3);
197 const float32x4_t vacc1x23 = vpaddq_f32(vacc1x2, vacc1x3);
198 const float32x4_t vacc2x23 = vpaddq_f32(vacc2x2, vacc2x3);
199 const float32x4_t vacc3x23 = vpaddq_f32(vacc3x2, vacc3x3);
200
201 float32x4_t vacc0x0123 = vpaddq_f32(vacc0x01, vacc0x23);
202 float32x4_t vacc1x0123 = vpaddq_f32(vacc1x01, vacc1x23);
203 float32x4_t vacc2x0123 = vpaddq_f32(vacc2x01, vacc2x23);
204 float32x4_t vacc3x0123 = vpaddq_f32(vacc3x01, vacc3x23);
205 #else
206 const float32x2_t vsum0x0 = vadd_f32(vget_low_f32(vacc0x0), vget_high_f32(vacc0x0));
207 const float32x2_t vsum1x0 = vadd_f32(vget_low_f32(vacc1x0), vget_high_f32(vacc1x0));
208 const float32x2_t vsum2x0 = vadd_f32(vget_low_f32(vacc2x0), vget_high_f32(vacc2x0));
209 const float32x2_t vsum3x0 = vadd_f32(vget_low_f32(vacc3x0), vget_high_f32(vacc3x0));
210 const float32x2_t vsum0x1 = vadd_f32(vget_low_f32(vacc0x1), vget_high_f32(vacc0x1));
211 const float32x2_t vsum1x1 = vadd_f32(vget_low_f32(vacc1x1), vget_high_f32(vacc1x1));
212 const float32x2_t vsum2x1 = vadd_f32(vget_low_f32(vacc2x1), vget_high_f32(vacc2x1));
213 const float32x2_t vsum3x1 = vadd_f32(vget_low_f32(vacc3x1), vget_high_f32(vacc3x1));
214 const float32x2_t vsum0x2 = vadd_f32(vget_low_f32(vacc0x2), vget_high_f32(vacc0x2));
215 const float32x2_t vsum1x2 = vadd_f32(vget_low_f32(vacc1x2), vget_high_f32(vacc1x2));
216 const float32x2_t vsum2x2 = vadd_f32(vget_low_f32(vacc2x2), vget_high_f32(vacc2x2));
217 const float32x2_t vsum3x2 = vadd_f32(vget_low_f32(vacc3x2), vget_high_f32(vacc3x2));
218 const float32x2_t vsum0x3 = vadd_f32(vget_low_f32(vacc0x3), vget_high_f32(vacc0x3));
219 const float32x2_t vsum1x3 = vadd_f32(vget_low_f32(vacc1x3), vget_high_f32(vacc1x3));
220 const float32x2_t vsum2x3 = vadd_f32(vget_low_f32(vacc2x3), vget_high_f32(vacc2x3));
221 const float32x2_t vsum3x3 = vadd_f32(vget_low_f32(vacc3x3), vget_high_f32(vacc3x3));
222
223 float32x4_t vacc0x0123 = vcombine_f32(vpadd_f32(vsum0x0, vsum0x1), vpadd_f32(vsum0x2, vsum0x3));
224 float32x4_t vacc1x0123 = vcombine_f32(vpadd_f32(vsum1x0, vsum1x1), vpadd_f32(vsum1x2, vsum1x3));
225 float32x4_t vacc2x0123 = vcombine_f32(vpadd_f32(vsum2x0, vsum2x1), vpadd_f32(vsum2x2, vsum2x3));
226 float32x4_t vacc3x0123 = vcombine_f32(vpadd_f32(vsum3x0, vsum3x1), vpadd_f32(vsum3x2, vsum3x3));
227 #endif
228
229 const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
230 vacc0x0123 = vminq_f32(vacc0x0123, vmax);
231 vacc1x0123 = vminq_f32(vacc1x0123, vmax);
232 vacc2x0123 = vminq_f32(vacc2x0123, vmax);
233 vacc3x0123 = vminq_f32(vacc3x0123, vmax);
234
235 const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
236 vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
237 vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
238 vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
239 vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
240
241 bfloat16x4_t vout0x0123 = vcvt_bf16_f32(vacc0x0123);
242 bfloat16x4_t vout1x0123 = vcvt_bf16_f32(vacc1x0123);
243 bfloat16x4_t vout2x0123 = vcvt_bf16_f32(vacc2x0123);
244 bfloat16x4_t vout3x0123 = vcvt_bf16_f32(vacc3x0123);
245
246 if XNN_LIKELY(nc >= 4) {
247 vst1_bf16(c0, vout0x0123);
248 c0 = (bfloat16_t*) ((uintptr_t) c0 + cn_stride);
249 vst1_bf16(c1, vout1x0123);
250 c1 = (bfloat16_t*) ((uintptr_t) c1 + cn_stride);
251 vst1_bf16(c2, vout2x0123);
252 c2 = (bfloat16_t*) ((uintptr_t) c2 + cn_stride);
253 vst1_bf16(c3, vout3x0123);
254 c3 = (bfloat16_t*) ((uintptr_t) c3 + cn_stride);
255
256 a0 = (const bfloat16_t*) ((uintptr_t) a0 - kc);
257 a1 = (const bfloat16_t*) ((uintptr_t) a1 - kc);
258 a2 = (const bfloat16_t*) ((uintptr_t) a2 - kc);
259 a3 = (const bfloat16_t*) ((uintptr_t) a3 - kc);
260
261 nc -= 4;
262 } else {
263 if (nc & 2) {
264 vst1_lane_u32((void*) c0, vreinterpret_u32_bf16(vout0x0123), 0); c0 += 2;
265 vst1_lane_u32((void*) c1, vreinterpret_u32_bf16(vout1x0123), 0); c1 += 2;
266 vst1_lane_u32((void*) c2, vreinterpret_u32_bf16(vout2x0123), 0); c2 += 2;
267 vst1_lane_u32((void*) c3, vreinterpret_u32_bf16(vout3x0123), 0); c3 += 2;
268
269 vout0x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout0x0123), vreinterpret_u16_bf16(vout0x0123), 2));
270 vout1x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout1x0123), vreinterpret_u16_bf16(vout1x0123), 2));
271 vout2x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout2x0123), vreinterpret_u16_bf16(vout2x0123), 2));
272 vout3x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout3x0123), vreinterpret_u16_bf16(vout3x0123), 2));
273 }
274 if (nc & 1) {
275 vst1_lane_bf16(c0, vout0x0123, 0);
276 vst1_lane_bf16(c1, vout1x0123, 0);
277 vst1_lane_bf16(c2, vout2x0123, 0);
278 vst1_lane_bf16(c3, vout3x0123, 0);
279 }
280
281 nc = 0;
282 }
283 } while (nc != 0);
284 }
285