xref: /aosp_15_r20/external/XNNPACK/src/f16-raddstoreexpminusmax/gen/neonfp16arith-rr2-p2-x80.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f16-raddstoreexpminusmax/neonfp16arith-rr2-p2.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/raddstoreexpminusmax.h>
16 
17 
xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x80(size_t batch,const void * input,const void * max,void * output,void * sum,const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x80(
19     size_t batch,
20     const void* input,
21     const void* max,
22     void* output,
23     void* sum,
24     const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
25 {
26   assert(batch % sizeof(__fp16) == 0);
27 
28   const float16x8_t vi_max = vld1q_dup_f16(max);
29   const float16x8_t vlog2e = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neonfp16arith_rr2_p2.log2e));
30   const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neonfp16arith_rr2_p2.magic_bias));
31   const float16x8_t vminus_ln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neonfp16arith_rr2_p2.minus_ln2_hi));
32   const float16x8_t vminus_ln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neonfp16arith_rr2_p2.minus_ln2_lo));
33   const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neonfp16arith_rr2_p2.c2));
34   const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neonfp16arith_rr2_p2.c1));
35   const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neonfp16arith_rr2_p2.denorm_cutoff));
36 
37   const __fp16* i = (const __fp16*) input;
38   __fp16* o = (__fp16*) output;
39   float16x8_t vacc0 = vmovq_n_f16(0.0f);
40   for (; batch >= 80 * sizeof(__fp16); batch -= 80 * sizeof(__fp16)) {
41     const float16x8_t vi0 = vld1q_f16(i); i += 8;
42     const float16x8_t vi1 = vld1q_f16(i); i += 8;
43     const float16x8_t vi2 = vld1q_f16(i); i += 8;
44     const float16x8_t vi3 = vld1q_f16(i); i += 8;
45     const float16x8_t vi4 = vld1q_f16(i); i += 8;
46     const float16x8_t vi5 = vld1q_f16(i); i += 8;
47     const float16x8_t vi6 = vld1q_f16(i); i += 8;
48     const float16x8_t vi7 = vld1q_f16(i); i += 8;
49     const float16x8_t vi8 = vld1q_f16(i); i += 8;
50     const float16x8_t vi9 = vld1q_f16(i); i += 8;
51 
52     const float16x8_t vx0 = vsubq_f16(vi0, vi_max);
53     const float16x8_t vx1 = vsubq_f16(vi1, vi_max);
54     const float16x8_t vx2 = vsubq_f16(vi2, vi_max);
55     const float16x8_t vx3 = vsubq_f16(vi3, vi_max);
56     const float16x8_t vx4 = vsubq_f16(vi4, vi_max);
57     const float16x8_t vx5 = vsubq_f16(vi5, vi_max);
58     const float16x8_t vx6 = vsubq_f16(vi6, vi_max);
59     const float16x8_t vx7 = vsubq_f16(vi7, vi_max);
60     const float16x8_t vx8 = vsubq_f16(vi8, vi_max);
61     const float16x8_t vx9 = vsubq_f16(vi9, vi_max);
62 
63     float16x8_t vn0 = vfmaq_f16(vmagic_bias, vx0, vlog2e);
64     float16x8_t vn1 = vfmaq_f16(vmagic_bias, vx1, vlog2e);
65     float16x8_t vn2 = vfmaq_f16(vmagic_bias, vx2, vlog2e);
66     float16x8_t vn3 = vfmaq_f16(vmagic_bias, vx3, vlog2e);
67     float16x8_t vn4 = vfmaq_f16(vmagic_bias, vx4, vlog2e);
68     float16x8_t vn5 = vfmaq_f16(vmagic_bias, vx5, vlog2e);
69     float16x8_t vn6 = vfmaq_f16(vmagic_bias, vx6, vlog2e);
70     float16x8_t vn7 = vfmaq_f16(vmagic_bias, vx7, vlog2e);
71     float16x8_t vn8 = vfmaq_f16(vmagic_bias, vx8, vlog2e);
72     float16x8_t vn9 = vfmaq_f16(vmagic_bias, vx9, vlog2e);
73 
74     const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
75     const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
76     const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
77     const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
78     const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
79     const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10));
80     const float16x8_t vs6 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn6), 10));
81     const float16x8_t vs7 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn7), 10));
82     const float16x8_t vs8 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn8), 10));
83     const float16x8_t vs9 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn9), 10));
84 
85     vn0 = vsubq_f16(vn0, vmagic_bias);
86     vn1 = vsubq_f16(vn1, vmagic_bias);
87     vn2 = vsubq_f16(vn2, vmagic_bias);
88     vn3 = vsubq_f16(vn3, vmagic_bias);
89     vn4 = vsubq_f16(vn4, vmagic_bias);
90     vn5 = vsubq_f16(vn5, vmagic_bias);
91     vn6 = vsubq_f16(vn6, vmagic_bias);
92     vn7 = vsubq_f16(vn7, vmagic_bias);
93     vn8 = vsubq_f16(vn8, vmagic_bias);
94     vn9 = vsubq_f16(vn9, vmagic_bias);
95 
96     float16x8_t vt0 = vfmaq_f16(vx0, vn0, vminus_ln2_hi);
97     float16x8_t vt1 = vfmaq_f16(vx1, vn1, vminus_ln2_hi);
98     float16x8_t vt2 = vfmaq_f16(vx2, vn2, vminus_ln2_hi);
99     float16x8_t vt3 = vfmaq_f16(vx3, vn3, vminus_ln2_hi);
100     float16x8_t vt4 = vfmaq_f16(vx4, vn4, vminus_ln2_hi);
101     float16x8_t vt5 = vfmaq_f16(vx5, vn5, vminus_ln2_hi);
102     float16x8_t vt6 = vfmaq_f16(vx6, vn6, vminus_ln2_hi);
103     float16x8_t vt7 = vfmaq_f16(vx7, vn7, vminus_ln2_hi);
104     float16x8_t vt8 = vfmaq_f16(vx8, vn8, vminus_ln2_hi);
105     float16x8_t vt9 = vfmaq_f16(vx9, vn9, vminus_ln2_hi);
106 
107     vt0 = vfmaq_f16(vt0, vn0, vminus_ln2_lo);
108     vt1 = vfmaq_f16(vt1, vn1, vminus_ln2_lo);
109     vt2 = vfmaq_f16(vt2, vn2, vminus_ln2_lo);
110     vt3 = vfmaq_f16(vt3, vn3, vminus_ln2_lo);
111     vt4 = vfmaq_f16(vt4, vn4, vminus_ln2_lo);
112     vt5 = vfmaq_f16(vt5, vn5, vminus_ln2_lo);
113     vt6 = vfmaq_f16(vt6, vn6, vminus_ln2_lo);
114     vt7 = vfmaq_f16(vt7, vn7, vminus_ln2_lo);
115     vt8 = vfmaq_f16(vt8, vn8, vminus_ln2_lo);
116     vt9 = vfmaq_f16(vt9, vn9, vminus_ln2_lo);
117 
118     const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
119     const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
120     const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
121     const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
122     const float16x8_t vp4 = vfmaq_f16(vc1, vc2, vt4);
123     const float16x8_t vp5 = vfmaq_f16(vc1, vc2, vt5);
124     const float16x8_t vp6 = vfmaq_f16(vc1, vc2, vt6);
125     const float16x8_t vp7 = vfmaq_f16(vc1, vc2, vt7);
126     const float16x8_t vp8 = vfmaq_f16(vc1, vc2, vt8);
127     const float16x8_t vp9 = vfmaq_f16(vc1, vc2, vt9);
128 
129     vt0 = vmulq_f16(vt0, vs0);
130     vt1 = vmulq_f16(vt1, vs1);
131     vt2 = vmulq_f16(vt2, vs2);
132     vt3 = vmulq_f16(vt3, vs3);
133     vt4 = vmulq_f16(vt4, vs4);
134     vt5 = vmulq_f16(vt5, vs5);
135     vt6 = vmulq_f16(vt6, vs6);
136     vt7 = vmulq_f16(vt7, vs7);
137     vt8 = vmulq_f16(vt8, vs8);
138     vt9 = vmulq_f16(vt9, vs9);
139 
140     float16x8_t vf0 = vfmaq_f16(vs0, vp0, vt0);
141     const uint16x8_t vm0 = vcltq_f16(vx0, vdenorm_cutoff);
142     float16x8_t vf1 = vfmaq_f16(vs1, vp1, vt1);
143     const uint16x8_t vm1 = vcltq_f16(vx1, vdenorm_cutoff);
144     float16x8_t vf2 = vfmaq_f16(vs2, vp2, vt2);
145     const uint16x8_t vm2 = vcltq_f16(vx2, vdenorm_cutoff);
146     float16x8_t vf3 = vfmaq_f16(vs3, vp3, vt3);
147     const uint16x8_t vm3 = vcltq_f16(vx3, vdenorm_cutoff);
148     float16x8_t vf4 = vfmaq_f16(vs4, vp4, vt4);
149     const uint16x8_t vm4 = vcltq_f16(vx4, vdenorm_cutoff);
150     float16x8_t vf5 = vfmaq_f16(vs5, vp5, vt5);
151     const uint16x8_t vm5 = vcltq_f16(vx5, vdenorm_cutoff);
152     float16x8_t vf6 = vfmaq_f16(vs6, vp6, vt6);
153     const uint16x8_t vm6 = vcltq_f16(vx6, vdenorm_cutoff);
154     float16x8_t vf7 = vfmaq_f16(vs7, vp7, vt7);
155     const uint16x8_t vm7 = vcltq_f16(vx7, vdenorm_cutoff);
156     float16x8_t vf8 = vfmaq_f16(vs8, vp8, vt8);
157     const uint16x8_t vm8 = vcltq_f16(vx8, vdenorm_cutoff);
158     float16x8_t vf9 = vfmaq_f16(vs9, vp9, vt9);
159     const uint16x8_t vm9 = vcltq_f16(vx9, vdenorm_cutoff);
160 
161     vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vm0));
162     vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vm1));
163     vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vm2));
164     vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vm3));
165     vf4 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf4), vm4));
166     vf5 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf5), vm5));
167     vf6 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf6), vm6));
168     vf7 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf7), vm7));
169     vf8 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf8), vm8));
170     vf9 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf9), vm9));
171 
172     vst1q_f16(o, vf0); o += 8;
173     vst1q_f16(o, vf1); o += 8;
174     vst1q_f16(o, vf2); o += 8;
175     vst1q_f16(o, vf3); o += 8;
176     vst1q_f16(o, vf4); o += 8;
177     vst1q_f16(o, vf5); o += 8;
178     vst1q_f16(o, vf6); o += 8;
179     vst1q_f16(o, vf7); o += 8;
180     vst1q_f16(o, vf8); o += 8;
181     vst1q_f16(o, vf9); o += 8;
182 
183     vacc0 = vaddq_f16(vacc0, vf0);
184     vacc0 = vaddq_f16(vacc0, vf1);
185     vacc0 = vaddq_f16(vacc0, vf2);
186     vacc0 = vaddq_f16(vacc0, vf3);
187     vacc0 = vaddq_f16(vacc0, vf4);
188     vacc0 = vaddq_f16(vacc0, vf5);
189     vacc0 = vaddq_f16(vacc0, vf6);
190     vacc0 = vaddq_f16(vacc0, vf7);
191     vacc0 = vaddq_f16(vacc0, vf8);
192     vacc0 = vaddq_f16(vacc0, vf9);
193   }
194 
195   float16x8_t vacc = vacc0;
196   for (; batch >= 8 * sizeof(__fp16); batch -= 8 * sizeof(__fp16)) {
197     const float16x8_t vi = vld1q_f16(i); i += 8;
198 
199     const float16x8_t vx = vsubq_f16(vi, vi_max);
200 
201     float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
202     const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
203     vn = vsubq_f16(vn, vmagic_bias);
204 
205     float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
206     vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
207 
208     const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
209     vt = vmulq_f16(vt, vs);
210 
211     float16x8_t vf = vfmaq_f16(vs, vp, vt);
212     const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
213     vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
214 
215     vst1q_f16(o, vf); o += 8;
216 
217     vacc = vaddq_f16(vacc, vf);
218   }
219   float16x4_t vacc_lo = vadd_f16(vget_low_f16(vacc), vget_high_f16(vacc));
220   if (batch != 0) {
221     assert(batch >= 1 * sizeof(__fp16));
222     assert(batch <= 7 * sizeof(__fp16));
223     const float16x8_t vi = vld1q_f16(i);
224 
225     const float16x8_t vx = vsubq_f16(vi, vi_max);
226 
227     float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
228     const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
229     vn = vsubq_f16(vn, vmagic_bias);
230 
231     float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
232     vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
233 
234     const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
235     vt = vmulq_f16(vt, vs);
236 
237     float16x8_t vf = vfmaq_f16(vs, vp, vt);
238     const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
239     vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
240 
241     float16x4_t vf_lo = vget_low_f16(vf);
242     if (batch & (4 * sizeof(__fp16))) {
243       vst1_f16(o, vf_lo); o += 4;
244       vacc_lo = vadd_f16(vacc_lo, vf_lo);
245       vf_lo = vget_high_f16(vf);
246     }
247     if (batch & (2 * sizeof(__fp16))) {
248       vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
249       vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 32)));
250       vf_lo = vext_f16(vf_lo, vf_lo, 2);
251     }
252     if (batch & (1 * sizeof(__fp16))) {
253       vst1_lane_f16(o, vf_lo, 0);
254       vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 48)));
255     }
256   }
257   vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
258   *((__fp16*) sum) = vget_lane_f16(vacc_lo, 0) + vget_lane_f16(vacc_lo, 1);
259 }
260