xref: /aosp_15_r20/external/XNNPACK/src/f16-vsigmoid/gen/vsigmoid-neonfp16arith-rr2-p2-nr1recps-x56.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f16-vsigmoid/neonfp16arith.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/vunary.h>
16 
17 
xnn_f16_vsigmoid_ukernel__neonfp16arith_rr2_p2_nr1recps_x56(size_t batch,const void * input,void * output,const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f16_vsigmoid_ukernel__neonfp16arith_rr2_p2_nr1recps_x56(
19     size_t batch,
20     const void* input,
21     void* output,
22     const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
23 {
24   assert(batch % sizeof(__fp16) == 0);
25 
26   const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neonfp16arith_rr2_p2.magic_bias));
27   const float16x8_t vminus_log2e = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neonfp16arith_rr2_p2.minus_log2e));
28   const float16x8_t vln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neonfp16arith_rr2_p2.ln2_hi));
29   const float16x8_t vln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neonfp16arith_rr2_p2.ln2_lo));
30   const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neonfp16arith_rr2_p2.c2));
31   const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neonfp16arith_rr2_p2.c1));
32   const float16x8_t vone = vmovq_n_f16(1.0f);
33   const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neonfp16arith_rr2_p2.denorm_cutoff));
34 
35   const __fp16* i = (const __fp16*) input;
36   __fp16* o = (__fp16*) output;
37   for (; batch >= 56 * sizeof(__fp16); batch -= 56 * sizeof(__fp16)) {
38     const float16x8_t vx0 = vld1q_f16(i); i += 8;
39     const float16x8_t vx1 = vld1q_f16(i); i += 8;
40     const float16x8_t vx2 = vld1q_f16(i); i += 8;
41     const float16x8_t vx3 = vld1q_f16(i); i += 8;
42     const float16x8_t vx4 = vld1q_f16(i); i += 8;
43     const float16x8_t vx5 = vld1q_f16(i); i += 8;
44     const float16x8_t vx6 = vld1q_f16(i); i += 8;
45 
46     const float16x8_t vz0 = vabsq_f16(vx0);
47     const float16x8_t vz1 = vabsq_f16(vx1);
48     const float16x8_t vz2 = vabsq_f16(vx2);
49     const float16x8_t vz3 = vabsq_f16(vx3);
50     const float16x8_t vz4 = vabsq_f16(vx4);
51     const float16x8_t vz5 = vabsq_f16(vx5);
52     const float16x8_t vz6 = vabsq_f16(vx6);
53 
54     float16x8_t vn0 = vfmaq_f16(vmagic_bias, vz0, vminus_log2e);
55     float16x8_t vn1 = vfmaq_f16(vmagic_bias, vz1, vminus_log2e);
56     float16x8_t vn2 = vfmaq_f16(vmagic_bias, vz2, vminus_log2e);
57     float16x8_t vn3 = vfmaq_f16(vmagic_bias, vz3, vminus_log2e);
58     float16x8_t vn4 = vfmaq_f16(vmagic_bias, vz4, vminus_log2e);
59     float16x8_t vn5 = vfmaq_f16(vmagic_bias, vz5, vminus_log2e);
60     float16x8_t vn6 = vfmaq_f16(vmagic_bias, vz6, vminus_log2e);
61 
62     const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
63     const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
64     const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
65     const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
66     const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
67     const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10));
68     const float16x8_t vs6 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn6), 10));
69 
70     vn0 = vsubq_f16(vn0, vmagic_bias);
71     vn1 = vsubq_f16(vn1, vmagic_bias);
72     vn2 = vsubq_f16(vn2, vmagic_bias);
73     vn3 = vsubq_f16(vn3, vmagic_bias);
74     vn4 = vsubq_f16(vn4, vmagic_bias);
75     vn5 = vsubq_f16(vn5, vmagic_bias);
76     vn6 = vsubq_f16(vn6, vmagic_bias);
77 
78     float16x8_t vt0 = vfmaq_f16(vz0, vn0, vln2_hi);
79     float16x8_t vt1 = vfmaq_f16(vz1, vn1, vln2_hi);
80     float16x8_t vt2 = vfmaq_f16(vz2, vn2, vln2_hi);
81     float16x8_t vt3 = vfmaq_f16(vz3, vn3, vln2_hi);
82     float16x8_t vt4 = vfmaq_f16(vz4, vn4, vln2_hi);
83     float16x8_t vt5 = vfmaq_f16(vz5, vn5, vln2_hi);
84     float16x8_t vt6 = vfmaq_f16(vz6, vn6, vln2_hi);
85 
86     vt0 = vfmaq_f16(vt0, vn0, vln2_lo);
87     vt1 = vfmaq_f16(vt1, vn1, vln2_lo);
88     vt2 = vfmaq_f16(vt2, vn2, vln2_lo);
89     vt3 = vfmaq_f16(vt3, vn3, vln2_lo);
90     vt4 = vfmaq_f16(vt4, vn4, vln2_lo);
91     vt5 = vfmaq_f16(vt5, vn5, vln2_lo);
92     vt6 = vfmaq_f16(vt6, vn6, vln2_lo);
93 
94     const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
95     const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
96     const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
97     const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
98     const float16x8_t vp4 = vfmaq_f16(vc1, vc2, vt4);
99     const float16x8_t vp5 = vfmaq_f16(vc1, vc2, vt5);
100     const float16x8_t vp6 = vfmaq_f16(vc1, vc2, vt6);
101 
102     vt0 = vmulq_f16(vt0, vs0);
103     vt1 = vmulq_f16(vt1, vs1);
104     vt2 = vmulq_f16(vt2, vs2);
105     vt3 = vmulq_f16(vt3, vs3);
106     vt4 = vmulq_f16(vt4, vs4);
107     vt5 = vmulq_f16(vt5, vs5);
108     vt6 = vmulq_f16(vt6, vs6);
109 
110     const float16x8_t ve0 = vfmaq_f16(vs0, vp0, vt0);
111     const float16x8_t ve1 = vfmaq_f16(vs1, vp1, vt1);
112     const float16x8_t ve2 = vfmaq_f16(vs2, vp2, vt2);
113     const float16x8_t ve3 = vfmaq_f16(vs3, vp3, vt3);
114     const float16x8_t ve4 = vfmaq_f16(vs4, vp4, vt4);
115     const float16x8_t ve5 = vfmaq_f16(vs5, vp5, vt5);
116     const float16x8_t ve6 = vfmaq_f16(vs6, vp6, vt6);
117 
118     const float16x8_t vd0 = vaddq_f16(ve0, vone);
119     const float16x8_t vd1 = vaddq_f16(ve1, vone);
120     const float16x8_t vd2 = vaddq_f16(ve2, vone);
121     const float16x8_t vd3 = vaddq_f16(ve3, vone);
122     const float16x8_t vd4 = vaddq_f16(ve4, vone);
123     const float16x8_t vd5 = vaddq_f16(ve5, vone);
124     const float16x8_t vd6 = vaddq_f16(ve6, vone);
125 
126     float16x8_t vr0 = vrecpeq_f16(vd0);
127     float16x8_t vr1 = vrecpeq_f16(vd1);
128     float16x8_t vr2 = vrecpeq_f16(vd2);
129     float16x8_t vr3 = vrecpeq_f16(vd3);
130     float16x8_t vr4 = vrecpeq_f16(vd4);
131     float16x8_t vr5 = vrecpeq_f16(vd5);
132     float16x8_t vr6 = vrecpeq_f16(vd6);
133 
134     const float16x8_t vadj0 = vrecpsq_f16(vr0, vd0);
135     const float16x8_t vadj1 = vrecpsq_f16(vr1, vd1);
136     const float16x8_t vadj2 = vrecpsq_f16(vr2, vd2);
137     const float16x8_t vadj3 = vrecpsq_f16(vr3, vd3);
138     const float16x8_t vadj4 = vrecpsq_f16(vr4, vd4);
139     const float16x8_t vadj5 = vrecpsq_f16(vr5, vd5);
140     const float16x8_t vadj6 = vrecpsq_f16(vr6, vd6);
141 
142     vr0 = vmulq_f16(vr0, vadj0);
143     vr1 = vmulq_f16(vr1, vadj1);
144     vr2 = vmulq_f16(vr2, vadj2);
145     vr3 = vmulq_f16(vr3, vadj3);
146     vr4 = vmulq_f16(vr4, vadj4);
147     vr5 = vmulq_f16(vr5, vadj5);
148     vr6 = vmulq_f16(vr6, vadj6);
149 
150     float16x8_t vf0 = vmulq_f16(ve0, vr0);
151     float16x8_t vf1 = vmulq_f16(ve1, vr1);
152     float16x8_t vf2 = vmulq_f16(ve2, vr2);
153     float16x8_t vf3 = vmulq_f16(ve3, vr3);
154     float16x8_t vf4 = vmulq_f16(ve4, vr4);
155     float16x8_t vf5 = vmulq_f16(ve5, vr5);
156     float16x8_t vf6 = vmulq_f16(ve6, vr6);
157 
158     vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vcagtq_f16(vx0, vdenorm_cutoff)));
159     vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vcagtq_f16(vx1, vdenorm_cutoff)));
160     vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vcagtq_f16(vx2, vdenorm_cutoff)));
161     vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vcagtq_f16(vx3, vdenorm_cutoff)));
162     vf4 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf4), vcagtq_f16(vx4, vdenorm_cutoff)));
163     vf5 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf5), vcagtq_f16(vx5, vdenorm_cutoff)));
164     vf6 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf6), vcagtq_f16(vx6, vdenorm_cutoff)));
165 
166     const uint16x8_t vm0 = vcltq_f16(vx0, vmovq_n_f16(0.0f));
167     const uint16x8_t vm1 = vcltq_f16(vx1, vmovq_n_f16(0.0f));
168     const uint16x8_t vm2 = vcltq_f16(vx2, vmovq_n_f16(0.0f));
169     const uint16x8_t vm3 = vcltq_f16(vx3, vmovq_n_f16(0.0f));
170     const uint16x8_t vm4 = vcltq_f16(vx4, vmovq_n_f16(0.0f));
171     const uint16x8_t vm5 = vcltq_f16(vx5, vmovq_n_f16(0.0f));
172     const uint16x8_t vm6 = vcltq_f16(vx6, vmovq_n_f16(0.0f));
173 
174     vf0 = vbslq_f16(vm0, vf0, vsubq_f16(vone, vf0));
175     vf1 = vbslq_f16(vm1, vf1, vsubq_f16(vone, vf1));
176     vf2 = vbslq_f16(vm2, vf2, vsubq_f16(vone, vf2));
177     vf3 = vbslq_f16(vm3, vf3, vsubq_f16(vone, vf3));
178     vf4 = vbslq_f16(vm4, vf4, vsubq_f16(vone, vf4));
179     vf5 = vbslq_f16(vm5, vf5, vsubq_f16(vone, vf5));
180     vf6 = vbslq_f16(vm6, vf6, vsubq_f16(vone, vf6));
181 
182     vst1q_f16(o, vf0); o += 8;
183     vst1q_f16(o, vf1); o += 8;
184     vst1q_f16(o, vf2); o += 8;
185     vst1q_f16(o, vf3); o += 8;
186     vst1q_f16(o, vf4); o += 8;
187     vst1q_f16(o, vf5); o += 8;
188     vst1q_f16(o, vf6); o += 8;
189   }
190   for (; batch >= 8 * sizeof(__fp16); batch -= 8 * sizeof(__fp16)) {
191     const float16x8_t vx = vld1q_f16(i); i += 8;
192 
193     const float16x8_t vz = vabsq_f16(vx);
194 
195     float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
196     const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
197     vn = vsubq_f16(vn, vmagic_bias);
198 
199     float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
200     vt = vfmaq_f16(vt, vn, vln2_lo);
201 
202     const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
203     vt = vmulq_f16(vt, vs);
204     const float16x8_t ve = vfmaq_f16(vs, vp, vt);
205     const float16x8_t vd = vaddq_f16(ve, vone);
206 
207     float16x8_t vr = vrecpeq_f16(vd);
208     const float16x8_t vadj = vrecpsq_f16(vr, vd);
209     vr = vmulq_f16(vr, vadj);
210 
211     float16x8_t vf = vmulq_f16(ve, vr);
212     vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
213     const uint16x8_t vm = vcltq_f16(vx, vmovq_n_f16(0.0f));
214     vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
215 
216     vst1q_f16(o, vf); o += 8;
217   }
218   if XNN_UNLIKELY(batch != 0) {
219     const float16x8_t vx = vld1q_f16(i);
220 
221     const float16x8_t vz = vabsq_f16(vx);
222 
223     float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
224     const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
225     vn = vsubq_f16(vn, vmagic_bias);
226 
227     float16x8_t vt = vfmaq_f16(vz, vn, vln2_hi);
228     vt = vfmaq_f16(vt, vn, vln2_lo);
229 
230     const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
231     vt = vmulq_f16(vt, vs);
232     const float16x8_t ve = vfmaq_f16(vs, vp, vt);
233     const float16x8_t vd = vaddq_f16(ve, vone);
234 
235     float16x8_t vr = vrecpeq_f16(vd);
236     const float16x8_t vadj = vrecpsq_f16(vr, vd);
237     vr = vmulq_f16(vr, vadj);
238 
239     float16x8_t vf = vmulq_f16(ve, vr);
240     vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
241     const uint16x8_t vm = vcltq_f16(vx, vmovq_n_f16(0.0f));
242     vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
243 
244     float16x4_t vf_lo = vget_low_f16(vf);
245     if (batch & (4 * sizeof(__fp16))) {
246       vst1_f16(o, vf_lo); o += 4;
247       vf_lo = vget_high_f16(vf);
248     }
249     if (batch & (2 * sizeof(__fp16))) {
250       vst1_f16(o, vf_lo); o += 2;
251       vf_lo = vext_f16(vf_lo, vf_lo, 2);
252     }
253     if (batch & (1 * sizeof(__fp16))) {
254       vst1_lane_f16(o, vf_lo, 0);
255     }
256   }
257 }
258