xref: /aosp_15_r20/external/XNNPACK/src/f32-velu/gen/velu-scalar-rr2-p6-x6.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-velu/scalar-rr2-p6.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 #include <math.h>
12 
13 #include <xnnpack/common.h>
14 #include <xnnpack/math.h>
15 #include <xnnpack/vunary.h>
16 
17 
xnn_f32_velu_ukernel__scalar_rr2_p6_x6(size_t n,const float * x,float * y,const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_velu_ukernel__scalar_rr2_p6_x6(
19     size_t n,
20     const float* x,
21     float* y,
22     const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
23 {
24   assert(n % sizeof(float) == 0);
25 
26   const float vprescale = params->scalar_rr2_p6.prescale;
27   const float valpha = params->scalar_rr2_p6.alpha;
28   const float vbeta = params->scalar_rr2_p6.beta;
29   const float vmagic_bias = params->scalar_rr2_p6.magic_bias;
30   const float vlog2e = params->scalar_rr2_p6.log2e;
31   const float vsat_cutoff = params->scalar_rr2_p6.sat_cutoff;
32   const float vminus_ln2_hi = params->scalar_rr2_p6.minus_ln2_hi;
33   const float vminus_ln2_lo = params->scalar_rr2_p6.minus_ln2_lo;
34   const float vc6 = params->scalar_rr2_p6.c6;
35   const float vc5 = params->scalar_rr2_p6.c5;
36   const float vc4 = params->scalar_rr2_p6.c4;
37   const float vc3 = params->scalar_rr2_p6.c3;
38   const float vc2 = params->scalar_rr2_p6.c2;
39   const float vone = params->scalar_rr2_p6.one;
40 
41   for (; n >= 6 * sizeof(float); n -= 6 * sizeof(float)) {
42     float vx0 = x[0];
43     float vx1 = x[1];
44     float vx2 = x[2];
45     float vx3 = x[3];
46     float vx4 = x[4];
47     float vx5 = x[5];
48     x += 6;
49 
50     const float vz0 = vx0 * vprescale;
51     const float vz1 = vx1 * vprescale;
52     const float vz2 = vx2 * vprescale;
53     const float vz3 = vx3 * vprescale;
54     const float vz4 = vx4 * vprescale;
55     const float vz5 = vx5 * vprescale;
56 
57     float vn0 = vz0 * vlog2e + vmagic_bias;
58     float vn1 = vz1 * vlog2e + vmagic_bias;
59     float vn2 = vz2 * vlog2e + vmagic_bias;
60     float vn3 = vz3 * vlog2e + vmagic_bias;
61     float vn4 = vz4 * vlog2e + vmagic_bias;
62     float vn5 = vz5 * vlog2e + vmagic_bias;
63 
64     float vs0 = uint32_as_float(float_as_uint32(vn0) << 23);
65     vn0 -= vmagic_bias;
66     float vs1 = uint32_as_float(float_as_uint32(vn1) << 23);
67     vn1 -= vmagic_bias;
68     float vs2 = uint32_as_float(float_as_uint32(vn2) << 23);
69     vn2 -= vmagic_bias;
70     float vs3 = uint32_as_float(float_as_uint32(vn3) << 23);
71     vn3 -= vmagic_bias;
72     float vs4 = uint32_as_float(float_as_uint32(vn4) << 23);
73     vn4 -= vmagic_bias;
74     float vs5 = uint32_as_float(float_as_uint32(vn5) << 23);
75     vn5 -= vmagic_bias;
76 
77     float vt0 = vn0 * vminus_ln2_hi + vz0;
78     float vt1 = vn1 * vminus_ln2_hi + vz1;
79     float vt2 = vn2 * vminus_ln2_hi + vz2;
80     float vt3 = vn3 * vminus_ln2_hi + vz3;
81     float vt4 = vn4 * vminus_ln2_hi + vz4;
82     float vt5 = vn5 * vminus_ln2_hi + vz5;
83 
84     vt0 = vn0 * vminus_ln2_lo + vt0;
85     vt1 = vn1 * vminus_ln2_lo + vt1;
86     vt2 = vn2 * vminus_ln2_lo + vt2;
87     vt3 = vn3 * vminus_ln2_lo + vt3;
88     vt4 = vn4 * vminus_ln2_lo + vt4;
89     vt5 = vn5 * vminus_ln2_lo + vt5;
90 
91     if XNN_UNPREDICTABLE(vz0 <= vsat_cutoff) {
92       vs0 = 0.0f;
93       vt0 = 0.0f;
94     }
95     if XNN_UNPREDICTABLE(vz1 <= vsat_cutoff) {
96       vs1 = 0.0f;
97       vt1 = 0.0f;
98     }
99     if XNN_UNPREDICTABLE(vz2 <= vsat_cutoff) {
100       vs2 = 0.0f;
101       vt2 = 0.0f;
102     }
103     if XNN_UNPREDICTABLE(vz3 <= vsat_cutoff) {
104       vs3 = 0.0f;
105       vt3 = 0.0f;
106     }
107     if XNN_UNPREDICTABLE(vz4 <= vsat_cutoff) {
108       vs4 = 0.0f;
109       vt4 = 0.0f;
110     }
111     if XNN_UNPREDICTABLE(vz5 <= vsat_cutoff) {
112       vs5 = 0.0f;
113       vt5 = 0.0f;
114     }
115 
116     float vp0 = vc6 * vt0 + vc5;
117     float vp1 = vc6 * vt1 + vc5;
118     float vp2 = vc6 * vt2 + vc5;
119     float vp3 = vc6 * vt3 + vc5;
120     float vp4 = vc6 * vt4 + vc5;
121     float vp5 = vc6 * vt5 + vc5;
122 
123     vp0 = vp0 * vt0 + vc4;
124     vp1 = vp1 * vt1 + vc4;
125     vp2 = vp2 * vt2 + vc4;
126     vp3 = vp3 * vt3 + vc4;
127     vp4 = vp4 * vt4 + vc4;
128     vp5 = vp5 * vt5 + vc4;
129 
130     vp0 = vp0 * vt0 + vc3;
131     vp1 = vp1 * vt1 + vc3;
132     vp2 = vp2 * vt2 + vc3;
133     vp3 = vp3 * vt3 + vc3;
134     vp4 = vp4 * vt4 + vc3;
135     vp5 = vp5 * vt5 + vc3;
136 
137     vp0 = vp0 * vt0 + vc2;
138     vp1 = vp1 * vt1 + vc2;
139     vp2 = vp2 * vt2 + vc2;
140     vp3 = vp3 * vt3 + vc2;
141     vp4 = vp4 * vt4 + vc2;
142     vp5 = vp5 * vt5 + vc2;
143 
144     vp0 *= vt0;
145     vp1 *= vt1;
146     vp2 *= vt2;
147     vp3 *= vt3;
148     vp4 *= vt4;
149     vp5 *= vt5;
150 
151     vt0 *= vs0;
152     vs0 -= vone;
153     vt1 *= vs1;
154     vs1 -= vone;
155     vt2 *= vs2;
156     vs2 -= vone;
157     vt3 *= vs3;
158     vs3 -= vone;
159     vt4 *= vs4;
160     vs4 -= vone;
161     vt5 *= vs5;
162     vs5 -= vone;
163 
164     vp0 = vp0 * vt0 + vt0;
165     vp1 = vp1 * vt1 + vt1;
166     vp2 = vp2 * vt2 + vt2;
167     vp3 = vp3 * vt3 + vt3;
168     vp4 = vp4 * vt4 + vt4;
169     vp5 = vp5 * vt5 + vt5;
170 
171     const float ve0 = (vp0 + vs0) * valpha;
172     float vy0 = vx0 * vbeta;
173     const float ve1 = (vp1 + vs1) * valpha;
174     float vy1 = vx1 * vbeta;
175     const float ve2 = (vp2 + vs2) * valpha;
176     float vy2 = vx2 * vbeta;
177     const float ve3 = (vp3 + vs3) * valpha;
178     float vy3 = vx3 * vbeta;
179     const float ve4 = (vp4 + vs4) * valpha;
180     float vy4 = vx4 * vbeta;
181     const float ve5 = (vp5 + vs5) * valpha;
182     float vy5 = vx5 * vbeta;
183 
184     if XNN_UNPREDICTABLE(vx0 < 0.0f) {
185       vy0 = ve0;
186     }
187     if XNN_UNPREDICTABLE(vx1 < 0.0f) {
188       vy1 = ve1;
189     }
190     if XNN_UNPREDICTABLE(vx2 < 0.0f) {
191       vy2 = ve2;
192     }
193     if XNN_UNPREDICTABLE(vx3 < 0.0f) {
194       vy3 = ve3;
195     }
196     if XNN_UNPREDICTABLE(vx4 < 0.0f) {
197       vy4 = ve4;
198     }
199     if XNN_UNPREDICTABLE(vx5 < 0.0f) {
200       vy5 = ve5;
201     }
202 
203     y[0] = vy0;
204     y[1] = vy1;
205     y[2] = vy2;
206     y[3] = vy3;
207     y[4] = vy4;
208     y[5] = vy5;
209     y += 6;
210   }
211   if XNN_UNLIKELY(n != 0) {
212     do {
213       float vx = *x++;
214 
215       const float vz = vx * vprescale;
216 
217       float vn = vz * vlog2e + vmagic_bias;
218       float vs = uint32_as_float(float_as_uint32(vn) << 23);
219       vn -= vmagic_bias;
220 
221       float vt = vn * vminus_ln2_hi + vz;
222       vt = vn * vminus_ln2_lo + vt;
223 
224       if XNN_UNPREDICTABLE(vz <= vsat_cutoff) {
225         vs = 0.0f;
226         vt = 0.0f;
227       }
228 
229       float vp = vc6 * vt + vc5;
230       vp = vp * vt + vc4;
231       vp = vp * vt + vc3;
232       vp = vp * vt + vc2;
233       vp *= vt;
234 
235       vt *= vs;
236       vs -= vone;
237       vp = vp * vt + vt;
238       const float ve = (vp + vs) * valpha;
239 
240       float vy = vx * vbeta;
241       if XNN_UNPREDICTABLE(vx < 0.0f) {
242         vy = ve;
243       }
244 
245       *y++ = vy;
246 
247       n -= sizeof(float);
248     } while (n != 0);
249   }
250 }
251