xref: /aosp_15_r20/external/XNNPACK/src/bf16-gemm/gen/1x4c8-minmax-neonfma-zip.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/bf16-gemm/c8-neon.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 
11 #include <assert.h>
12 
13 #include <arm_neon.h>
14 
15 #include <xnnpack/gemm.h>
16 
17 
xnn_bf16_gemm_minmax_ukernel_1x4c8__neonfma_zip(size_t mr,size_t nc,size_t kc,const void * restrict a,size_t a_stride,const void * restrict w_ptr,void * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_bf16_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_bf16_gemm_minmax_ukernel_1x4c8__neonfma_zip(
19     size_t mr,
20     size_t nc,
21     size_t kc,
22     const void* restrict a,
23     size_t a_stride,
24     const void* restrict w_ptr,
25     void* restrict c,
26     size_t cm_stride,
27     size_t cn_stride,
28     const union xnn_bf16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
29 {
30   assert(mr != 0);
31   assert(mr <= 1);
32   assert(nc != 0);
33   assert(kc != 0);
34   assert(kc % sizeof(uint16_t) == 0);
35   assert(a != NULL);
36   assert(w_ptr != NULL);
37   assert(c != NULL);
38 
39   const uint16_t* a0 = (const uint16_t*) a;
40   uint16_t* c0 = (uint16_t*) c;
41 
42   const uint16_t* w = (const uint16_t*) w_ptr;
43   const uint16x8_t vzero = vmovq_n_u16(0);
44   do {
45     float32x4_t vacc0x0 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
46     float32x4_t vacc0x1 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
47     float32x4_t vacc0x2 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
48     float32x4_t vacc0x3 = vreinterpretq_f32_u32(vshll_n_u16(vld1_lane_u16(w, vdup_n_u16(0), 0), 16)); w += 1;
49 
50     size_t k = kc;
51     for (; k >= 8 * sizeof(uint16_t); k -= 8 * sizeof(uint16_t)) {
52       const uint16x8_t va0 = vld1q_u16(a0); a0 += 8;
53 
54       const uint16x8_t vb0 = vld1q_u16(w); w += 8;
55       const uint16x8_t vb1 = vld1q_u16(w); w += 8;
56       const uint16x8_t vb2 = vld1q_u16(w); w += 8;
57       const uint16x8_t vb3 = vld1q_u16(w); w += 8;
58 
59       const float32x4_t va0e = vreinterpretq_f32_u16(vzip1q_u16(vzero, va0));
60 
61       const float32x4_t vb0e = vreinterpretq_f32_u16(vzip1q_u16(vzero, vb0));
62       const float32x4_t vb1e = vreinterpretq_f32_u16(vzip1q_u16(vzero, vb1));
63       const float32x4_t vb2e = vreinterpretq_f32_u16(vzip1q_u16(vzero, vb2));
64       const float32x4_t vb3e = vreinterpretq_f32_u16(vzip1q_u16(vzero, vb3));
65 
66       vacc0x0 = vfmaq_f32(vacc0x0, va0e, vb0e);
67       vacc0x1 = vfmaq_f32(vacc0x1, va0e, vb1e);
68       vacc0x2 = vfmaq_f32(vacc0x2, va0e, vb2e);
69       vacc0x3 = vfmaq_f32(vacc0x3, va0e, vb3e);
70 
71       const float32x4_t va0o = vreinterpretq_f32_u16(vzip2q_u16(vzero, va0));
72 
73       const float32x4_t vb0o = vreinterpretq_f32_u16(vzip2q_u16(vzero, vb0));
74       const float32x4_t vb1o = vreinterpretq_f32_u16(vzip2q_u16(vzero, vb1));
75       const float32x4_t vb2o = vreinterpretq_f32_u16(vzip2q_u16(vzero, vb2));
76       const float32x4_t vb3o = vreinterpretq_f32_u16(vzip2q_u16(vzero, vb3));
77 
78       vacc0x0 = vfmaq_f32(vacc0x0, va0o, vb0o);
79       vacc0x1 = vfmaq_f32(vacc0x1, va0o, vb1o);
80       vacc0x2 = vfmaq_f32(vacc0x2, va0o, vb2o);
81       vacc0x3 = vfmaq_f32(vacc0x3, va0o, vb3o);
82     }
83     if XNN_UNLIKELY(k != 0) {
84       const uint16x8_t va0 = vld1q_u16(a0); a0 = (const uint16_t*) ((uintptr_t) a0 + k);
85 
86       const uint16x8_t vb0 = vld1q_u16(w); w += 8;
87       const uint16x8_t vb1 = vld1q_u16(w); w += 8;
88       const uint16x8_t vb2 = vld1q_u16(w); w += 8;
89       const uint16x8_t vb3 = vld1q_u16(w); w += 8;
90 
91       const uint16x8_t vm0 = vceqq_u16(vb0, vmovq_n_u16(0));
92       const uint16x8_t vm1 = vceqq_u16(vb1, vmovq_n_u16(0));
93       const uint16x8_t vm2 = vceqq_u16(vb2, vmovq_n_u16(0));
94       const uint16x8_t vm3 = vceqq_u16(vb3, vmovq_n_u16(0));
95 
96       const float32x4_t vb0e = vreinterpretq_f32_u16(vzip1q_u16(vzero, vb0));
97       const float32x4_t vb1e = vreinterpretq_f32_u16(vzip1q_u16(vzero, vb1));
98       const float32x4_t vb2e = vreinterpretq_f32_u16(vzip1q_u16(vzero, vb2));
99       const float32x4_t vb3e = vreinterpretq_f32_u16(vzip1q_u16(vzero, vb3));
100 
101       const uint16x8_t va0x0 = vbicq_u16(va0, vm0);
102       const uint16x8_t va0x1 = vbicq_u16(va0, vm1);
103       const uint16x8_t va0x2 = vbicq_u16(va0, vm2);
104       const uint16x8_t va0x3 = vbicq_u16(va0, vm3);
105 
106       const float32x4_t va0x0e = vreinterpretq_f32_u16(vzip1q_u16(vzero, va0x0));
107       const float32x4_t va0x1e = vreinterpretq_f32_u16(vzip1q_u16(vzero, va0x1));
108       const float32x4_t va0x2e = vreinterpretq_f32_u16(vzip1q_u16(vzero, va0x2));
109       const float32x4_t va0x3e = vreinterpretq_f32_u16(vzip1q_u16(vzero, va0x3));
110 
111       vacc0x0 = vfmaq_f32(vacc0x0, va0x0e, vb0e);
112       vacc0x1 = vfmaq_f32(vacc0x1, va0x1e, vb1e);
113       vacc0x2 = vfmaq_f32(vacc0x2, va0x2e, vb2e);
114       vacc0x3 = vfmaq_f32(vacc0x3, va0x3e, vb3e);
115 
116       const float32x4_t vb0o = vreinterpretq_f32_u16(vzip2q_u16(vzero, vb0));
117       const float32x4_t vb1o = vreinterpretq_f32_u16(vzip2q_u16(vzero, vb1));
118       const float32x4_t vb2o = vreinterpretq_f32_u16(vzip2q_u16(vzero, vb2));
119       const float32x4_t vb3o = vreinterpretq_f32_u16(vzip2q_u16(vzero, vb3));
120 
121       const float32x4_t va0x0o = vreinterpretq_f32_u16(vzip2q_u16(vzero, va0x0));
122       const float32x4_t va0x1o = vreinterpretq_f32_u16(vzip2q_u16(vzero, va0x1));
123       const float32x4_t va0x2o = vreinterpretq_f32_u16(vzip2q_u16(vzero, va0x2));
124       const float32x4_t va0x3o = vreinterpretq_f32_u16(vzip2q_u16(vzero, va0x3));
125 
126       vacc0x0 = vfmaq_f32(vacc0x0, va0x0o, vb0o);
127       vacc0x1 = vfmaq_f32(vacc0x1, va0x1o, vb1o);
128       vacc0x2 = vfmaq_f32(vacc0x2, va0x2o, vb2o);
129       vacc0x3 = vfmaq_f32(vacc0x3, va0x3o, vb3o);
130     }
131 
132 #if XNN_ARCH_ARM64
133     const float32x4_t vacc0x01 = vpaddq_f32(vacc0x0, vacc0x1);
134     const float32x4_t vacc0x23 = vpaddq_f32(vacc0x2, vacc0x3);
135 
136     float32x4_t vacc0x0123 = vpaddq_f32(vacc0x01, vacc0x23);
137 #else
138     const float32x2_t vsum0x0 = vadd_f32(vget_low_f32(vacc0x0), vget_high_f32(vacc0x0));
139     const float32x2_t vsum0x1 = vadd_f32(vget_low_f32(vacc0x1), vget_high_f32(vacc0x1));
140     const float32x2_t vsum0x2 = vadd_f32(vget_low_f32(vacc0x2), vget_high_f32(vacc0x2));
141     const float32x2_t vsum0x3 = vadd_f32(vget_low_f32(vacc0x3), vget_high_f32(vacc0x3));
142 
143     float32x4_t vacc0x0123 = vcombine_f32(vpadd_f32(vsum0x0, vsum0x1), vpadd_f32(vsum0x2, vsum0x3));
144 #endif
145 
146     const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
147     vacc0x0123 = vminq_f32(vacc0x0123, vmax);
148 
149     const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
150     vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
151 
152     uint16x4_t vout0x0123 = vshrn_n_u32(vreinterpretq_u32_f32(vacc0x0123), 16);
153 
154     if XNN_LIKELY(nc >= 4) {
155       vst1_u16(c0, vout0x0123);
156       c0 = (uint16_t*) ((uintptr_t) c0 + cn_stride);
157 
158       a0 = (const uint16_t*) ((uintptr_t) a0 - kc);
159 
160       nc -= 4;
161     } else {
162       if (nc & 2) {
163         vst1_lane_u32((void*) c0, vreinterpret_u32_u16(vout0x0123), 0); c0 += 2;
164 
165         vout0x0123 = vext_u16(vout0x0123, vout0x0123, 2);
166       }
167       if (nc & 1) {
168         vst1_lane_u16(c0, vout0x0123, 0);
169       }
170 
171       nc = 0;
172     }
173   } while (nc != 0);
174 }
175