xref: /aosp_15_r20/external/XNNPACK/src/f32-velu/gen/velu-wasm-rr2-p6-x6.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-velu/scalar-rr2-p6.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 #include <math.h>
12 
13 #include <xnnpack/common.h>
14 #include <xnnpack/math.h>
15 #include <xnnpack/vunary.h>
16 
17 
xnn_f32_velu_ukernel__wasm_rr2_p6_x6(size_t n,const float * x,float * y,const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_velu_ukernel__wasm_rr2_p6_x6(
19     size_t n,
20     const float* x,
21     float* y,
22     const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
23 {
24   assert(n % sizeof(float) == 0);
25 
26   const float vprescale = params->scalar_rr2_p6.prescale;
27   const float valpha = params->scalar_rr2_p6.alpha;
28   const float vbeta = params->scalar_rr2_p6.beta;
29   const float vmagic_bias = params->scalar_rr2_p6.magic_bias;
30   const float vlog2e = params->scalar_rr2_p6.log2e;
31   const float vsat_cutoff = params->scalar_rr2_p6.sat_cutoff;
32   const float vminus_ln2_hi = params->scalar_rr2_p6.minus_ln2_hi;
33   const float vminus_ln2_lo = params->scalar_rr2_p6.minus_ln2_lo;
34   const float vc6 = params->scalar_rr2_p6.c6;
35   const float vc5 = params->scalar_rr2_p6.c5;
36   const float vc4 = params->scalar_rr2_p6.c4;
37   const float vc3 = params->scalar_rr2_p6.c3;
38   const float vc2 = params->scalar_rr2_p6.c2;
39   const float vone = params->scalar_rr2_p6.one;
40 
41   for (; n >= 6 * sizeof(float); n -= 6 * sizeof(float)) {
42     float vx0 = x[0];
43     float vx1 = x[1];
44     float vx2 = x[2];
45     float vx3 = x[3];
46     float vx4 = x[4];
47     float vx5 = x[5];
48     x += 6;
49 
50     const float vz0 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx0 * vprescale, vsat_cutoff), 0.0f);
51     const float vz1 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx1 * vprescale, vsat_cutoff), 0.0f);
52     const float vz2 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx2 * vprescale, vsat_cutoff), 0.0f);
53     const float vz3 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx3 * vprescale, vsat_cutoff), 0.0f);
54     const float vz4 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx4 * vprescale, vsat_cutoff), 0.0f);
55     const float vz5 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx5 * vprescale, vsat_cutoff), 0.0f);
56 
57     float vn0 = vz0 * vlog2e + vmagic_bias;
58     float vn1 = vz1 * vlog2e + vmagic_bias;
59     float vn2 = vz2 * vlog2e + vmagic_bias;
60     float vn3 = vz3 * vlog2e + vmagic_bias;
61     float vn4 = vz4 * vlog2e + vmagic_bias;
62     float vn5 = vz5 * vlog2e + vmagic_bias;
63 
64     float vs0 = uint32_as_float(float_as_uint32(vn0) << 23);
65     vn0 -= vmagic_bias;
66     float vs1 = uint32_as_float(float_as_uint32(vn1) << 23);
67     vn1 -= vmagic_bias;
68     float vs2 = uint32_as_float(float_as_uint32(vn2) << 23);
69     vn2 -= vmagic_bias;
70     float vs3 = uint32_as_float(float_as_uint32(vn3) << 23);
71     vn3 -= vmagic_bias;
72     float vs4 = uint32_as_float(float_as_uint32(vn4) << 23);
73     vn4 -= vmagic_bias;
74     float vs5 = uint32_as_float(float_as_uint32(vn5) << 23);
75     vn5 -= vmagic_bias;
76 
77     float vt0 = vn0 * vminus_ln2_hi + vz0;
78     float vt1 = vn1 * vminus_ln2_hi + vz1;
79     float vt2 = vn2 * vminus_ln2_hi + vz2;
80     float vt3 = vn3 * vminus_ln2_hi + vz3;
81     float vt4 = vn4 * vminus_ln2_hi + vz4;
82     float vt5 = vn5 * vminus_ln2_hi + vz5;
83 
84     vt0 = vn0 * vminus_ln2_lo + vt0;
85     vt1 = vn1 * vminus_ln2_lo + vt1;
86     vt2 = vn2 * vminus_ln2_lo + vt2;
87     vt3 = vn3 * vminus_ln2_lo + vt3;
88     vt4 = vn4 * vminus_ln2_lo + vt4;
89     vt5 = vn5 * vminus_ln2_lo + vt5;
90 
91 
92     float vp0 = vc6 * vt0 + vc5;
93     float vp1 = vc6 * vt1 + vc5;
94     float vp2 = vc6 * vt2 + vc5;
95     float vp3 = vc6 * vt3 + vc5;
96     float vp4 = vc6 * vt4 + vc5;
97     float vp5 = vc6 * vt5 + vc5;
98 
99     vp0 = vp0 * vt0 + vc4;
100     vp1 = vp1 * vt1 + vc4;
101     vp2 = vp2 * vt2 + vc4;
102     vp3 = vp3 * vt3 + vc4;
103     vp4 = vp4 * vt4 + vc4;
104     vp5 = vp5 * vt5 + vc4;
105 
106     vp0 = vp0 * vt0 + vc3;
107     vp1 = vp1 * vt1 + vc3;
108     vp2 = vp2 * vt2 + vc3;
109     vp3 = vp3 * vt3 + vc3;
110     vp4 = vp4 * vt4 + vc3;
111     vp5 = vp5 * vt5 + vc3;
112 
113     vp0 = vp0 * vt0 + vc2;
114     vp1 = vp1 * vt1 + vc2;
115     vp2 = vp2 * vt2 + vc2;
116     vp3 = vp3 * vt3 + vc2;
117     vp4 = vp4 * vt4 + vc2;
118     vp5 = vp5 * vt5 + vc2;
119 
120     vp0 *= vt0;
121     vp1 *= vt1;
122     vp2 *= vt2;
123     vp3 *= vt3;
124     vp4 *= vt4;
125     vp5 *= vt5;
126 
127     vt0 *= vs0;
128     vs0 -= vone;
129     vt1 *= vs1;
130     vs1 -= vone;
131     vt2 *= vs2;
132     vs2 -= vone;
133     vt3 *= vs3;
134     vs3 -= vone;
135     vt4 *= vs4;
136     vs4 -= vone;
137     vt5 *= vs5;
138     vs5 -= vone;
139 
140     vp0 = vp0 * vt0 + vt0;
141     vp1 = vp1 * vt1 + vt1;
142     vp2 = vp2 * vt2 + vt2;
143     vp3 = vp3 * vt3 + vt3;
144     vp4 = vp4 * vt4 + vt4;
145     vp5 = vp5 * vt5 + vt5;
146 
147     const float ve0 = (vp0 + vs0) * valpha;
148     float vy0 = __builtin_wasm_max_f32(vx0 * vbeta, 0.0f);
149     const float ve1 = (vp1 + vs1) * valpha;
150     float vy1 = __builtin_wasm_max_f32(vx1 * vbeta, 0.0f);
151     const float ve2 = (vp2 + vs2) * valpha;
152     float vy2 = __builtin_wasm_max_f32(vx2 * vbeta, 0.0f);
153     const float ve3 = (vp3 + vs3) * valpha;
154     float vy3 = __builtin_wasm_max_f32(vx3 * vbeta, 0.0f);
155     const float ve4 = (vp4 + vs4) * valpha;
156     float vy4 = __builtin_wasm_max_f32(vx4 * vbeta, 0.0f);
157     const float ve5 = (vp5 + vs5) * valpha;
158     float vy5 = __builtin_wasm_max_f32(vx5 * vbeta, 0.0f);
159 
160     vy0 += __builtin_wasm_min_f32(ve0, 0.0f);
161     vy1 += __builtin_wasm_min_f32(ve1, 0.0f);
162     vy2 += __builtin_wasm_min_f32(ve2, 0.0f);
163     vy3 += __builtin_wasm_min_f32(ve3, 0.0f);
164     vy4 += __builtin_wasm_min_f32(ve4, 0.0f);
165     vy5 += __builtin_wasm_min_f32(ve5, 0.0f);
166 
167     y[0] = vy0;
168     y[1] = vy1;
169     y[2] = vy2;
170     y[3] = vy3;
171     y[4] = vy4;
172     y[5] = vy5;
173     y += 6;
174   }
175   if XNN_UNLIKELY(n != 0) {
176     do {
177       float vx = *x++;
178 
179       const float vz = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx * vprescale, vsat_cutoff), 0.0f);
180 
181       float vn = vz * vlog2e + vmagic_bias;
182       float vs = uint32_as_float(float_as_uint32(vn) << 23);
183       vn -= vmagic_bias;
184 
185       float vt = vn * vminus_ln2_hi + vz;
186       vt = vn * vminus_ln2_lo + vt;
187 
188 
189       float vp = vc6 * vt + vc5;
190       vp = vp * vt + vc4;
191       vp = vp * vt + vc3;
192       vp = vp * vt + vc2;
193       vp *= vt;
194 
195       vt *= vs;
196       vs -= vone;
197       vp = vp * vt + vt;
198       const float ve = (vp + vs) * valpha;
199 
200       float vy = __builtin_wasm_max_f32(vx * vbeta, 0.0f);
201       vy += __builtin_wasm_min_f32(ve, 0.0f);
202 
203       *y++ = vy;
204 
205       n -= sizeof(float);
206     } while (n != 0);
207   }
208 }
209