1 // Auto-generated file. Do not edit!
2 // Template: src/bf16-gemm/c8-neonbf16.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10
11 #include <assert.h>
12
13 #include <arm_neon.h>
14
15 #include <xnnpack/gemm.h>
16
17
xnn_bf16_gemm_minmax_ukernel_5x4c8__neonbf16_bfmlal(size_t mr,size_t nc,size_t kc,const void * restrict a,size_t a_stride,const void * restrict w_ptr,void * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_bf16_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_bf16_gemm_minmax_ukernel_5x4c8__neonbf16_bfmlal(
19 size_t mr,
20 size_t nc,
21 size_t kc,
22 const void* restrict a,
23 size_t a_stride,
24 const void* restrict w_ptr,
25 void* restrict c,
26 size_t cm_stride,
27 size_t cn_stride,
28 const union xnn_bf16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
29 {
30 assert(mr != 0);
31 assert(mr <= 5);
32 assert(nc != 0);
33 assert(kc != 0);
34 assert(kc % sizeof(bfloat16_t) == 0);
35 assert(a != NULL);
36 assert(w_ptr != NULL);
37 assert(c != NULL);
38
39 const bfloat16_t* a0 = (const bfloat16_t*) a;
40 bfloat16_t* c0 = (bfloat16_t*) c;
41 const bfloat16_t* a1 = (const bfloat16_t*) ((uintptr_t) a0 + a_stride);
42 bfloat16_t* c1 = (bfloat16_t*) ((uintptr_t) c0 + cm_stride);
43 if XNN_UNPREDICTABLE(mr < 2) {
44 a1 = a0;
45 c1 = c0;
46 }
47 const bfloat16_t* a2 = (const bfloat16_t*) ((uintptr_t) a1 + a_stride);
48 bfloat16_t* c2 = (bfloat16_t*) ((uintptr_t) c1 + cm_stride);
49 if XNN_UNPREDICTABLE(mr <= 2) {
50 a2 = a1;
51 c2 = c1;
52 }
53 const bfloat16_t* a3 = (const bfloat16_t*) ((uintptr_t) a2 + a_stride);
54 bfloat16_t* c3 = (bfloat16_t*) ((uintptr_t) c2 + cm_stride);
55 if XNN_UNPREDICTABLE(mr < 4) {
56 a3 = a2;
57 c3 = c2;
58 }
59 const bfloat16_t* a4 = (const bfloat16_t*) ((uintptr_t) a3 + a_stride);
60 bfloat16_t* c4 = (bfloat16_t*) ((uintptr_t) c3 + cm_stride);
61 if XNN_UNPREDICTABLE(mr <= 4) {
62 a4 = a3;
63 c4 = c3;
64 }
65
66 const bfloat16_t* w = (const bfloat16_t*) w_ptr;
67 do {
68 float32x4_t vacc0x0 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
69 float32x4_t vacc0x1 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
70 float32x4_t vacc0x2 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
71 float32x4_t vacc0x3 = vcvt_f32_bf16(vld1_lane_bf16(w, vreinterpret_bf16_u16(vdup_n_u16(0)), 0)); w += 1;
72 float32x4_t vacc1x0 = vacc0x0;
73 float32x4_t vacc1x1 = vacc0x1;
74 float32x4_t vacc1x2 = vacc0x2;
75 float32x4_t vacc1x3 = vacc0x3;
76 float32x4_t vacc2x0 = vacc0x0;
77 float32x4_t vacc2x1 = vacc0x1;
78 float32x4_t vacc2x2 = vacc0x2;
79 float32x4_t vacc2x3 = vacc0x3;
80 float32x4_t vacc3x0 = vacc0x0;
81 float32x4_t vacc3x1 = vacc0x1;
82 float32x4_t vacc3x2 = vacc0x2;
83 float32x4_t vacc3x3 = vacc0x3;
84 float32x4_t vacc4x0 = vacc0x0;
85 float32x4_t vacc4x1 = vacc0x1;
86 float32x4_t vacc4x2 = vacc0x2;
87 float32x4_t vacc4x3 = vacc0x3;
88
89 size_t k = kc;
90 for (; k >= 8 * sizeof(bfloat16_t); k -= 8 * sizeof(bfloat16_t)) {
91 const bfloat16x8_t va0 = vld1q_bf16(a0); a0 += 8;
92 const bfloat16x8_t va1 = vld1q_bf16(a1); a1 += 8;
93 const bfloat16x8_t va2 = vld1q_bf16(a2); a2 += 8;
94 const bfloat16x8_t va3 = vld1q_bf16(a3); a3 += 8;
95 const bfloat16x8_t va4 = vld1q_bf16(a4); a4 += 8;
96
97 const bfloat16x8_t vb0 = vld1q_bf16(w); w += 8;
98 const bfloat16x8_t vb1 = vld1q_bf16(w); w += 8;
99 const bfloat16x8_t vb2 = vld1q_bf16(w); w += 8;
100 const bfloat16x8_t vb3 = vld1q_bf16(w); w += 8;
101
102 vacc0x0 = vbfmlalbq_f32(vacc0x0, va0, vb0);
103 vacc1x0 = vbfmlalbq_f32(vacc1x0, va1, vb0);
104 vacc2x0 = vbfmlalbq_f32(vacc2x0, va2, vb0);
105 vacc3x0 = vbfmlalbq_f32(vacc3x0, va3, vb0);
106 vacc4x0 = vbfmlalbq_f32(vacc4x0, va4, vb0);
107 vacc0x1 = vbfmlalbq_f32(vacc0x1, va0, vb1);
108 vacc1x1 = vbfmlalbq_f32(vacc1x1, va1, vb1);
109 vacc2x1 = vbfmlalbq_f32(vacc2x1, va2, vb1);
110 vacc3x1 = vbfmlalbq_f32(vacc3x1, va3, vb1);
111 vacc4x1 = vbfmlalbq_f32(vacc4x1, va4, vb1);
112 vacc0x2 = vbfmlalbq_f32(vacc0x2, va0, vb2);
113 vacc1x2 = vbfmlalbq_f32(vacc1x2, va1, vb2);
114 vacc2x2 = vbfmlalbq_f32(vacc2x2, va2, vb2);
115 vacc3x2 = vbfmlalbq_f32(vacc3x2, va3, vb2);
116 vacc4x2 = vbfmlalbq_f32(vacc4x2, va4, vb2);
117 vacc0x3 = vbfmlalbq_f32(vacc0x3, va0, vb3);
118 vacc1x3 = vbfmlalbq_f32(vacc1x3, va1, vb3);
119 vacc2x3 = vbfmlalbq_f32(vacc2x3, va2, vb3);
120 vacc3x3 = vbfmlalbq_f32(vacc3x3, va3, vb3);
121 vacc4x3 = vbfmlalbq_f32(vacc4x3, va4, vb3);
122
123 vacc0x0 = vbfmlaltq_f32(vacc0x0, va0, vb0);
124 vacc1x0 = vbfmlaltq_f32(vacc1x0, va1, vb0);
125 vacc2x0 = vbfmlaltq_f32(vacc2x0, va2, vb0);
126 vacc3x0 = vbfmlaltq_f32(vacc3x0, va3, vb0);
127 vacc4x0 = vbfmlaltq_f32(vacc4x0, va4, vb0);
128 vacc0x1 = vbfmlaltq_f32(vacc0x1, va0, vb1);
129 vacc1x1 = vbfmlaltq_f32(vacc1x1, va1, vb1);
130 vacc2x1 = vbfmlaltq_f32(vacc2x1, va2, vb1);
131 vacc3x1 = vbfmlaltq_f32(vacc3x1, va3, vb1);
132 vacc4x1 = vbfmlaltq_f32(vacc4x1, va4, vb1);
133 vacc0x2 = vbfmlaltq_f32(vacc0x2, va0, vb2);
134 vacc1x2 = vbfmlaltq_f32(vacc1x2, va1, vb2);
135 vacc2x2 = vbfmlaltq_f32(vacc2x2, va2, vb2);
136 vacc3x2 = vbfmlaltq_f32(vacc3x2, va3, vb2);
137 vacc4x2 = vbfmlaltq_f32(vacc4x2, va4, vb2);
138 vacc0x3 = vbfmlaltq_f32(vacc0x3, va0, vb3);
139 vacc1x3 = vbfmlaltq_f32(vacc1x3, va1, vb3);
140 vacc2x3 = vbfmlaltq_f32(vacc2x3, va2, vb3);
141 vacc3x3 = vbfmlaltq_f32(vacc3x3, va3, vb3);
142 vacc4x3 = vbfmlaltq_f32(vacc4x3, va4, vb3);
143 }
144 if XNN_UNLIKELY(k != 0) {
145 const bfloat16x8_t va0 = vld1q_bf16(a0); a0 = (const bfloat16_t*) ((uintptr_t) a0 + k);
146 const bfloat16x8_t va1 = vld1q_bf16(a1); a1 = (const bfloat16_t*) ((uintptr_t) a1 + k);
147 const bfloat16x8_t va2 = vld1q_bf16(a2); a2 = (const bfloat16_t*) ((uintptr_t) a2 + k);
148 const bfloat16x8_t va3 = vld1q_bf16(a3); a3 = (const bfloat16_t*) ((uintptr_t) a3 + k);
149 const bfloat16x8_t va4 = vld1q_bf16(a4); a4 = (const bfloat16_t*) ((uintptr_t) a4 + k);
150
151 const bfloat16x8_t vb0 = vld1q_bf16(w); w += 8;
152 const bfloat16x8_t vb1 = vld1q_bf16(w); w += 8;
153 const bfloat16x8_t vb2 = vld1q_bf16(w); w += 8;
154 const bfloat16x8_t vb3 = vld1q_bf16(w); w += 8;
155
156 const uint16x8_t vm0 = vceqq_u16(vreinterpretq_u16_bf16(vb0), vmovq_n_u16(0));
157 const uint16x8_t vm1 = vceqq_u16(vreinterpretq_u16_bf16(vb1), vmovq_n_u16(0));
158 const uint16x8_t vm2 = vceqq_u16(vreinterpretq_u16_bf16(vb2), vmovq_n_u16(0));
159 const uint16x8_t vm3 = vceqq_u16(vreinterpretq_u16_bf16(vb3), vmovq_n_u16(0));
160
161 const bfloat16x8_t va0x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm0));
162 vacc0x0 = vbfmlalbq_f32(vacc0x0, va0x0, vb0);
163 vacc0x0 = vbfmlaltq_f32(vacc0x0, va0x0, vb0);
164 const bfloat16x8_t va1x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm0));
165 vacc1x0 = vbfmlalbq_f32(vacc1x0, va1x0, vb0);
166 vacc1x0 = vbfmlaltq_f32(vacc1x0, va1x0, vb0);
167 const bfloat16x8_t va2x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va2), vm0));
168 vacc2x0 = vbfmlalbq_f32(vacc2x0, va2x0, vb0);
169 vacc2x0 = vbfmlaltq_f32(vacc2x0, va2x0, vb0);
170 const bfloat16x8_t va3x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va3), vm0));
171 vacc3x0 = vbfmlalbq_f32(vacc3x0, va3x0, vb0);
172 vacc3x0 = vbfmlaltq_f32(vacc3x0, va3x0, vb0);
173 const bfloat16x8_t va4x0 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va4), vm0));
174 vacc4x0 = vbfmlalbq_f32(vacc4x0, va4x0, vb0);
175 vacc4x0 = vbfmlaltq_f32(vacc4x0, va4x0, vb0);
176 const bfloat16x8_t va0x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm1));
177 vacc0x1 = vbfmlalbq_f32(vacc0x1, va0x1, vb1);
178 vacc0x1 = vbfmlaltq_f32(vacc0x1, va0x1, vb1);
179 const bfloat16x8_t va1x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm1));
180 vacc1x1 = vbfmlalbq_f32(vacc1x1, va1x1, vb1);
181 vacc1x1 = vbfmlaltq_f32(vacc1x1, va1x1, vb1);
182 const bfloat16x8_t va2x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va2), vm1));
183 vacc2x1 = vbfmlalbq_f32(vacc2x1, va2x1, vb1);
184 vacc2x1 = vbfmlaltq_f32(vacc2x1, va2x1, vb1);
185 const bfloat16x8_t va3x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va3), vm1));
186 vacc3x1 = vbfmlalbq_f32(vacc3x1, va3x1, vb1);
187 vacc3x1 = vbfmlaltq_f32(vacc3x1, va3x1, vb1);
188 const bfloat16x8_t va4x1 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va4), vm1));
189 vacc4x1 = vbfmlalbq_f32(vacc4x1, va4x1, vb1);
190 vacc4x1 = vbfmlaltq_f32(vacc4x1, va4x1, vb1);
191 const bfloat16x8_t va0x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm2));
192 vacc0x2 = vbfmlalbq_f32(vacc0x2, va0x2, vb2);
193 vacc0x2 = vbfmlaltq_f32(vacc0x2, va0x2, vb2);
194 const bfloat16x8_t va1x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm2));
195 vacc1x2 = vbfmlalbq_f32(vacc1x2, va1x2, vb2);
196 vacc1x2 = vbfmlaltq_f32(vacc1x2, va1x2, vb2);
197 const bfloat16x8_t va2x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va2), vm2));
198 vacc2x2 = vbfmlalbq_f32(vacc2x2, va2x2, vb2);
199 vacc2x2 = vbfmlaltq_f32(vacc2x2, va2x2, vb2);
200 const bfloat16x8_t va3x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va3), vm2));
201 vacc3x2 = vbfmlalbq_f32(vacc3x2, va3x2, vb2);
202 vacc3x2 = vbfmlaltq_f32(vacc3x2, va3x2, vb2);
203 const bfloat16x8_t va4x2 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va4), vm2));
204 vacc4x2 = vbfmlalbq_f32(vacc4x2, va4x2, vb2);
205 vacc4x2 = vbfmlaltq_f32(vacc4x2, va4x2, vb2);
206 const bfloat16x8_t va0x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va0), vm3));
207 vacc0x3 = vbfmlalbq_f32(vacc0x3, va0x3, vb3);
208 vacc0x3 = vbfmlaltq_f32(vacc0x3, va0x3, vb3);
209 const bfloat16x8_t va1x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va1), vm3));
210 vacc1x3 = vbfmlalbq_f32(vacc1x3, va1x3, vb3);
211 vacc1x3 = vbfmlaltq_f32(vacc1x3, va1x3, vb3);
212 const bfloat16x8_t va2x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va2), vm3));
213 vacc2x3 = vbfmlalbq_f32(vacc2x3, va2x3, vb3);
214 vacc2x3 = vbfmlaltq_f32(vacc2x3, va2x3, vb3);
215 const bfloat16x8_t va3x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va3), vm3));
216 vacc3x3 = vbfmlalbq_f32(vacc3x3, va3x3, vb3);
217 vacc3x3 = vbfmlaltq_f32(vacc3x3, va3x3, vb3);
218 const bfloat16x8_t va4x3 = vreinterpretq_bf16_u16(vbicq_u16(vreinterpretq_u16_bf16(va4), vm3));
219 vacc4x3 = vbfmlalbq_f32(vacc4x3, va4x3, vb3);
220 vacc4x3 = vbfmlaltq_f32(vacc4x3, va4x3, vb3);
221 }
222
223 #if XNN_ARCH_ARM64
224 const float32x4_t vacc0x01 = vpaddq_f32(vacc0x0, vacc0x1);
225 const float32x4_t vacc1x01 = vpaddq_f32(vacc1x0, vacc1x1);
226 const float32x4_t vacc2x01 = vpaddq_f32(vacc2x0, vacc2x1);
227 const float32x4_t vacc3x01 = vpaddq_f32(vacc3x0, vacc3x1);
228 const float32x4_t vacc4x01 = vpaddq_f32(vacc4x0, vacc4x1);
229 const float32x4_t vacc0x23 = vpaddq_f32(vacc0x2, vacc0x3);
230 const float32x4_t vacc1x23 = vpaddq_f32(vacc1x2, vacc1x3);
231 const float32x4_t vacc2x23 = vpaddq_f32(vacc2x2, vacc2x3);
232 const float32x4_t vacc3x23 = vpaddq_f32(vacc3x2, vacc3x3);
233 const float32x4_t vacc4x23 = vpaddq_f32(vacc4x2, vacc4x3);
234
235 float32x4_t vacc0x0123 = vpaddq_f32(vacc0x01, vacc0x23);
236 float32x4_t vacc1x0123 = vpaddq_f32(vacc1x01, vacc1x23);
237 float32x4_t vacc2x0123 = vpaddq_f32(vacc2x01, vacc2x23);
238 float32x4_t vacc3x0123 = vpaddq_f32(vacc3x01, vacc3x23);
239 float32x4_t vacc4x0123 = vpaddq_f32(vacc4x01, vacc4x23);
240 #else
241 const float32x2_t vsum0x0 = vadd_f32(vget_low_f32(vacc0x0), vget_high_f32(vacc0x0));
242 const float32x2_t vsum1x0 = vadd_f32(vget_low_f32(vacc1x0), vget_high_f32(vacc1x0));
243 const float32x2_t vsum2x0 = vadd_f32(vget_low_f32(vacc2x0), vget_high_f32(vacc2x0));
244 const float32x2_t vsum3x0 = vadd_f32(vget_low_f32(vacc3x0), vget_high_f32(vacc3x0));
245 const float32x2_t vsum4x0 = vadd_f32(vget_low_f32(vacc4x0), vget_high_f32(vacc4x0));
246 const float32x2_t vsum0x1 = vadd_f32(vget_low_f32(vacc0x1), vget_high_f32(vacc0x1));
247 const float32x2_t vsum1x1 = vadd_f32(vget_low_f32(vacc1x1), vget_high_f32(vacc1x1));
248 const float32x2_t vsum2x1 = vadd_f32(vget_low_f32(vacc2x1), vget_high_f32(vacc2x1));
249 const float32x2_t vsum3x1 = vadd_f32(vget_low_f32(vacc3x1), vget_high_f32(vacc3x1));
250 const float32x2_t vsum4x1 = vadd_f32(vget_low_f32(vacc4x1), vget_high_f32(vacc4x1));
251 const float32x2_t vsum0x2 = vadd_f32(vget_low_f32(vacc0x2), vget_high_f32(vacc0x2));
252 const float32x2_t vsum1x2 = vadd_f32(vget_low_f32(vacc1x2), vget_high_f32(vacc1x2));
253 const float32x2_t vsum2x2 = vadd_f32(vget_low_f32(vacc2x2), vget_high_f32(vacc2x2));
254 const float32x2_t vsum3x2 = vadd_f32(vget_low_f32(vacc3x2), vget_high_f32(vacc3x2));
255 const float32x2_t vsum4x2 = vadd_f32(vget_low_f32(vacc4x2), vget_high_f32(vacc4x2));
256 const float32x2_t vsum0x3 = vadd_f32(vget_low_f32(vacc0x3), vget_high_f32(vacc0x3));
257 const float32x2_t vsum1x3 = vadd_f32(vget_low_f32(vacc1x3), vget_high_f32(vacc1x3));
258 const float32x2_t vsum2x3 = vadd_f32(vget_low_f32(vacc2x3), vget_high_f32(vacc2x3));
259 const float32x2_t vsum3x3 = vadd_f32(vget_low_f32(vacc3x3), vget_high_f32(vacc3x3));
260 const float32x2_t vsum4x3 = vadd_f32(vget_low_f32(vacc4x3), vget_high_f32(vacc4x3));
261
262 float32x4_t vacc0x0123 = vcombine_f32(vpadd_f32(vsum0x0, vsum0x1), vpadd_f32(vsum0x2, vsum0x3));
263 float32x4_t vacc1x0123 = vcombine_f32(vpadd_f32(vsum1x0, vsum1x1), vpadd_f32(vsum1x2, vsum1x3));
264 float32x4_t vacc2x0123 = vcombine_f32(vpadd_f32(vsum2x0, vsum2x1), vpadd_f32(vsum2x2, vsum2x3));
265 float32x4_t vacc3x0123 = vcombine_f32(vpadd_f32(vsum3x0, vsum3x1), vpadd_f32(vsum3x2, vsum3x3));
266 float32x4_t vacc4x0123 = vcombine_f32(vpadd_f32(vsum4x0, vsum4x1), vpadd_f32(vsum4x2, vsum4x3));
267 #endif
268
269 const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
270 vacc0x0123 = vminq_f32(vacc0x0123, vmax);
271 vacc1x0123 = vminq_f32(vacc1x0123, vmax);
272 vacc2x0123 = vminq_f32(vacc2x0123, vmax);
273 vacc3x0123 = vminq_f32(vacc3x0123, vmax);
274 vacc4x0123 = vminq_f32(vacc4x0123, vmax);
275
276 const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
277 vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
278 vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
279 vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
280 vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
281 vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
282
283 bfloat16x4_t vout0x0123 = vcvt_bf16_f32(vacc0x0123);
284 bfloat16x4_t vout1x0123 = vcvt_bf16_f32(vacc1x0123);
285 bfloat16x4_t vout2x0123 = vcvt_bf16_f32(vacc2x0123);
286 bfloat16x4_t vout3x0123 = vcvt_bf16_f32(vacc3x0123);
287 bfloat16x4_t vout4x0123 = vcvt_bf16_f32(vacc4x0123);
288
289 if XNN_LIKELY(nc >= 4) {
290 vst1_bf16(c0, vout0x0123);
291 c0 = (bfloat16_t*) ((uintptr_t) c0 + cn_stride);
292 vst1_bf16(c1, vout1x0123);
293 c1 = (bfloat16_t*) ((uintptr_t) c1 + cn_stride);
294 vst1_bf16(c2, vout2x0123);
295 c2 = (bfloat16_t*) ((uintptr_t) c2 + cn_stride);
296 vst1_bf16(c3, vout3x0123);
297 c3 = (bfloat16_t*) ((uintptr_t) c3 + cn_stride);
298 vst1_bf16(c4, vout4x0123);
299 c4 = (bfloat16_t*) ((uintptr_t) c4 + cn_stride);
300
301 a0 = (const bfloat16_t*) ((uintptr_t) a0 - kc);
302 a1 = (const bfloat16_t*) ((uintptr_t) a1 - kc);
303 a2 = (const bfloat16_t*) ((uintptr_t) a2 - kc);
304 a3 = (const bfloat16_t*) ((uintptr_t) a3 - kc);
305 a4 = (const bfloat16_t*) ((uintptr_t) a4 - kc);
306
307 nc -= 4;
308 } else {
309 if (nc & 2) {
310 vst1_lane_u32((void*) c0, vreinterpret_u32_bf16(vout0x0123), 0); c0 += 2;
311 vst1_lane_u32((void*) c1, vreinterpret_u32_bf16(vout1x0123), 0); c1 += 2;
312 vst1_lane_u32((void*) c2, vreinterpret_u32_bf16(vout2x0123), 0); c2 += 2;
313 vst1_lane_u32((void*) c3, vreinterpret_u32_bf16(vout3x0123), 0); c3 += 2;
314 vst1_lane_u32((void*) c4, vreinterpret_u32_bf16(vout4x0123), 0); c4 += 2;
315
316 vout0x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout0x0123), vreinterpret_u16_bf16(vout0x0123), 2));
317 vout1x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout1x0123), vreinterpret_u16_bf16(vout1x0123), 2));
318 vout2x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout2x0123), vreinterpret_u16_bf16(vout2x0123), 2));
319 vout3x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout3x0123), vreinterpret_u16_bf16(vout3x0123), 2));
320 vout4x0123 = vreinterpret_bf16_u16(vext_u16(vreinterpret_u16_bf16(vout4x0123), vreinterpret_u16_bf16(vout4x0123), 2));
321 }
322 if (nc & 1) {
323 vst1_lane_bf16(c0, vout0x0123, 0);
324 vst1_lane_bf16(c1, vout1x0123, 0);
325 vst1_lane_bf16(c2, vout2x0123, 0);
326 vst1_lane_bf16(c3, vout3x0123, 0);
327 vst1_lane_bf16(c4, vout4x0123, 0);
328 }
329
330 nc = 0;
331 }
332 } while (nc != 0);
333 }
334