xref: /aosp_15_r20/external/XNNPACK/src/f32-velu/gen/velu-scalar-rr2-lut16-p3-x5.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-velu/scalar-rr2-lut16-p3.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 #include <math.h>
12 
13 #include <xnnpack/common.h>
14 #include <xnnpack/math.h>
15 #include <xnnpack/vunary.h>
16 
17 
18 extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
19 
xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x5(size_t n,const float * x,float * y,const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS (1)])20 void xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x5(
21     size_t n,
22     const float* x,
23     float* y,
24     const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
25 {
26   assert(n % sizeof(float) == 0);
27 
28   const float vprescale = params->scalar_rr2_lut16_p3.prescale;
29   const float valpha = params->scalar_rr2_lut16_p3.alpha;
30   const float vbeta = params->scalar_rr2_lut16_p3.beta;
31   const float vmagic_bias = params->scalar_rr2_lut16_p3.magic_bias;
32   const float vlog2e = params->scalar_rr2_lut16_p3.log2e;
33   const uint32_t vindex_mask = UINT32_C(0xF);
34   const float vsat_cutoff = params->scalar_rr2_lut16_p3.sat_cutoff;
35   const float vminus_ln2_hi = params->scalar_rr2_lut16_p3.minus_ln2_hi;
36   const float vminus_ln2_lo = params->scalar_rr2_lut16_p3.minus_ln2_lo;
37   const float vc3 = params->scalar_rr2_lut16_p3.c3;
38   const float vc2 = params->scalar_rr2_lut16_p3.c2;
39   const float vone = params->scalar_rr2_lut16_p3.one;
40 
41   for (; n >= 5 * sizeof(float); n -= 5 * sizeof(float)) {
42     float vx0 = x[0];
43     float vx1 = x[1];
44     float vx2 = x[2];
45     float vx3 = x[3];
46     float vx4 = x[4];
47     x += 5;
48 
49     const float vz0 = vx0 * vprescale;
50     const float vz1 = vx1 * vprescale;
51     const float vz2 = vx2 * vprescale;
52     const float vz3 = vx3 * vprescale;
53     const float vz4 = vx4 * vprescale;
54 
55     float vn0 = vz0 * vlog2e + vmagic_bias;
56     float vn1 = vz1 * vlog2e + vmagic_bias;
57     float vn2 = vz2 * vlog2e + vmagic_bias;
58     float vn3 = vz3 * vlog2e + vmagic_bias;
59     float vn4 = vz4 * vlog2e + vmagic_bias;
60 
61     const uint32_t ven0 = float_as_uint32(vn0) << 19;
62     const uint32_t vidx0 = float_as_uint32(vn0) & vindex_mask;
63     vn0 -= vmagic_bias;
64     const uint32_t ven1 = float_as_uint32(vn1) << 19;
65     const uint32_t vidx1 = float_as_uint32(vn1) & vindex_mask;
66     vn1 -= vmagic_bias;
67     const uint32_t ven2 = float_as_uint32(vn2) << 19;
68     const uint32_t vidx2 = float_as_uint32(vn2) & vindex_mask;
69     vn2 -= vmagic_bias;
70     const uint32_t ven3 = float_as_uint32(vn3) << 19;
71     const uint32_t vidx3 = float_as_uint32(vn3) & vindex_mask;
72     vn3 -= vmagic_bias;
73     const uint32_t ven4 = float_as_uint32(vn4) << 19;
74     const uint32_t vidx4 = float_as_uint32(vn4) & vindex_mask;
75     vn4 -= vmagic_bias;
76 
77     float vt0 = vn0 * vminus_ln2_hi + vz0;
78     float vs0 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx0] + ven0);
79     float vt1 = vn1 * vminus_ln2_hi + vz1;
80     float vs1 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx1] + ven1);
81     float vt2 = vn2 * vminus_ln2_hi + vz2;
82     float vs2 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx2] + ven2);
83     float vt3 = vn3 * vminus_ln2_hi + vz3;
84     float vs3 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx3] + ven3);
85     float vt4 = vn4 * vminus_ln2_hi + vz4;
86     float vs4 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx4] + ven4);
87 
88     vt0 = vn0 * vminus_ln2_lo + vt0;
89     if XNN_UNPREDICTABLE(vz0 <= vsat_cutoff) {
90       vs0 = 0.0f;
91       vt0 = 0.0f;
92     }
93     vt1 = vn1 * vminus_ln2_lo + vt1;
94     if XNN_UNPREDICTABLE(vz1 <= vsat_cutoff) {
95       vs1 = 0.0f;
96       vt1 = 0.0f;
97     }
98     vt2 = vn2 * vminus_ln2_lo + vt2;
99     if XNN_UNPREDICTABLE(vz2 <= vsat_cutoff) {
100       vs2 = 0.0f;
101       vt2 = 0.0f;
102     }
103     vt3 = vn3 * vminus_ln2_lo + vt3;
104     if XNN_UNPREDICTABLE(vz3 <= vsat_cutoff) {
105       vs3 = 0.0f;
106       vt3 = 0.0f;
107     }
108     vt4 = vn4 * vminus_ln2_lo + vt4;
109     if XNN_UNPREDICTABLE(vz4 <= vsat_cutoff) {
110       vs4 = 0.0f;
111       vt4 = 0.0f;
112     }
113 
114     float vp0 = vc3 * vt0 + vc2;
115     float vp1 = vc3 * vt1 + vc2;
116     float vp2 = vc3 * vt2 + vc2;
117     float vp3 = vc3 * vt3 + vc2;
118     float vp4 = vc3 * vt4 + vc2;
119 
120     vp0 *= vt0;
121     vp1 *= vt1;
122     vp2 *= vt2;
123     vp3 *= vt3;
124     vp4 *= vt4;
125 
126     vt0 *= vs0;
127     vs0 -= vone;
128     vt1 *= vs1;
129     vs1 -= vone;
130     vt2 *= vs2;
131     vs2 -= vone;
132     vt3 *= vs3;
133     vs3 -= vone;
134     vt4 *= vs4;
135     vs4 -= vone;
136 
137     vp0 = vp0 * vt0 + vt0;
138     vp1 = vp1 * vt1 + vt1;
139     vp2 = vp2 * vt2 + vt2;
140     vp3 = vp3 * vt3 + vt3;
141     vp4 = vp4 * vt4 + vt4;
142 
143     const float ve0 = (vp0 + vs0) * valpha;
144     float vy0 = vx0 * vbeta;
145     const float ve1 = (vp1 + vs1) * valpha;
146     float vy1 = vx1 * vbeta;
147     const float ve2 = (vp2 + vs2) * valpha;
148     float vy2 = vx2 * vbeta;
149     const float ve3 = (vp3 + vs3) * valpha;
150     float vy3 = vx3 * vbeta;
151     const float ve4 = (vp4 + vs4) * valpha;
152     float vy4 = vx4 * vbeta;
153 
154     if XNN_UNPREDICTABLE(vx0 < 0.0f) {
155       vy0 = ve0;
156     }
157     if XNN_UNPREDICTABLE(vx1 < 0.0f) {
158       vy1 = ve1;
159     }
160     if XNN_UNPREDICTABLE(vx2 < 0.0f) {
161       vy2 = ve2;
162     }
163     if XNN_UNPREDICTABLE(vx3 < 0.0f) {
164       vy3 = ve3;
165     }
166     if XNN_UNPREDICTABLE(vx4 < 0.0f) {
167       vy4 = ve4;
168     }
169 
170     y[0] = vy0;
171     y[1] = vy1;
172     y[2] = vy2;
173     y[3] = vy3;
174     y[4] = vy4;
175     y += 5;
176   }
177   if XNN_UNLIKELY(n != 0) {
178     do {
179       float vx = *x++;
180 
181       const float vz = vx * vprescale;
182 
183       float vn = vz * vlog2e + vmagic_bias;
184       const uint32_t ven = float_as_uint32(vn) << 19;
185       const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
186       vn -= vmagic_bias;
187 
188       float vt = vn * vminus_ln2_hi + vz;
189       float vs = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx] + ven);
190 
191       vt = vn * vminus_ln2_lo + vt;
192       if XNN_UNPREDICTABLE(vz <= vsat_cutoff) {
193         vs = 0.0f;
194         vt = 0.0f;
195       }
196 
197       float vp = vc3 * vt + vc2;
198       vp *= vt;
199 
200       vt *= vs;
201       vs -= vone;
202       vp = vp * vt + vt;
203       const float ve = (vp + vs) * valpha;
204 
205       float vy = vx * vbeta;
206       if XNN_UNPREDICTABLE(vx < 0.0f) {
207         vy = ve;
208       }
209 
210       *y++ = vy;
211 
212       n -= sizeof(float);
213     } while (n != 0);
214   }
215 }
216