1 // Auto-generated file. Do not edit!
2 // Template: src/f32-velu/scalar-rr2-p6.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11 #include <math.h>
12
13 #include <xnnpack/common.h>
14 #include <xnnpack/math.h>
15 #include <xnnpack/vunary.h>
16
17
xnn_f32_velu_ukernel__wasm_rr2_p6_x5(size_t n,const float * x,float * y,const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_velu_ukernel__wasm_rr2_p6_x5(
19 size_t n,
20 const float* x,
21 float* y,
22 const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
23 {
24 assert(n % sizeof(float) == 0);
25
26 const float vprescale = params->scalar_rr2_p6.prescale;
27 const float valpha = params->scalar_rr2_p6.alpha;
28 const float vbeta = params->scalar_rr2_p6.beta;
29 const float vmagic_bias = params->scalar_rr2_p6.magic_bias;
30 const float vlog2e = params->scalar_rr2_p6.log2e;
31 const float vsat_cutoff = params->scalar_rr2_p6.sat_cutoff;
32 const float vminus_ln2_hi = params->scalar_rr2_p6.minus_ln2_hi;
33 const float vminus_ln2_lo = params->scalar_rr2_p6.minus_ln2_lo;
34 const float vc6 = params->scalar_rr2_p6.c6;
35 const float vc5 = params->scalar_rr2_p6.c5;
36 const float vc4 = params->scalar_rr2_p6.c4;
37 const float vc3 = params->scalar_rr2_p6.c3;
38 const float vc2 = params->scalar_rr2_p6.c2;
39 const float vone = params->scalar_rr2_p6.one;
40
41 for (; n >= 5 * sizeof(float); n -= 5 * sizeof(float)) {
42 float vx0 = x[0];
43 float vx1 = x[1];
44 float vx2 = x[2];
45 float vx3 = x[3];
46 float vx4 = x[4];
47 x += 5;
48
49 const float vz0 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx0 * vprescale, vsat_cutoff), 0.0f);
50 const float vz1 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx1 * vprescale, vsat_cutoff), 0.0f);
51 const float vz2 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx2 * vprescale, vsat_cutoff), 0.0f);
52 const float vz3 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx3 * vprescale, vsat_cutoff), 0.0f);
53 const float vz4 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx4 * vprescale, vsat_cutoff), 0.0f);
54
55 float vn0 = vz0 * vlog2e + vmagic_bias;
56 float vn1 = vz1 * vlog2e + vmagic_bias;
57 float vn2 = vz2 * vlog2e + vmagic_bias;
58 float vn3 = vz3 * vlog2e + vmagic_bias;
59 float vn4 = vz4 * vlog2e + vmagic_bias;
60
61 float vs0 = uint32_as_float(float_as_uint32(vn0) << 23);
62 vn0 -= vmagic_bias;
63 float vs1 = uint32_as_float(float_as_uint32(vn1) << 23);
64 vn1 -= vmagic_bias;
65 float vs2 = uint32_as_float(float_as_uint32(vn2) << 23);
66 vn2 -= vmagic_bias;
67 float vs3 = uint32_as_float(float_as_uint32(vn3) << 23);
68 vn3 -= vmagic_bias;
69 float vs4 = uint32_as_float(float_as_uint32(vn4) << 23);
70 vn4 -= vmagic_bias;
71
72 float vt0 = vn0 * vminus_ln2_hi + vz0;
73 float vt1 = vn1 * vminus_ln2_hi + vz1;
74 float vt2 = vn2 * vminus_ln2_hi + vz2;
75 float vt3 = vn3 * vminus_ln2_hi + vz3;
76 float vt4 = vn4 * vminus_ln2_hi + vz4;
77
78 vt0 = vn0 * vminus_ln2_lo + vt0;
79 vt1 = vn1 * vminus_ln2_lo + vt1;
80 vt2 = vn2 * vminus_ln2_lo + vt2;
81 vt3 = vn3 * vminus_ln2_lo + vt3;
82 vt4 = vn4 * vminus_ln2_lo + vt4;
83
84
85 float vp0 = vc6 * vt0 + vc5;
86 float vp1 = vc6 * vt1 + vc5;
87 float vp2 = vc6 * vt2 + vc5;
88 float vp3 = vc6 * vt3 + vc5;
89 float vp4 = vc6 * vt4 + vc5;
90
91 vp0 = vp0 * vt0 + vc4;
92 vp1 = vp1 * vt1 + vc4;
93 vp2 = vp2 * vt2 + vc4;
94 vp3 = vp3 * vt3 + vc4;
95 vp4 = vp4 * vt4 + vc4;
96
97 vp0 = vp0 * vt0 + vc3;
98 vp1 = vp1 * vt1 + vc3;
99 vp2 = vp2 * vt2 + vc3;
100 vp3 = vp3 * vt3 + vc3;
101 vp4 = vp4 * vt4 + vc3;
102
103 vp0 = vp0 * vt0 + vc2;
104 vp1 = vp1 * vt1 + vc2;
105 vp2 = vp2 * vt2 + vc2;
106 vp3 = vp3 * vt3 + vc2;
107 vp4 = vp4 * vt4 + vc2;
108
109 vp0 *= vt0;
110 vp1 *= vt1;
111 vp2 *= vt2;
112 vp3 *= vt3;
113 vp4 *= vt4;
114
115 vt0 *= vs0;
116 vs0 -= vone;
117 vt1 *= vs1;
118 vs1 -= vone;
119 vt2 *= vs2;
120 vs2 -= vone;
121 vt3 *= vs3;
122 vs3 -= vone;
123 vt4 *= vs4;
124 vs4 -= vone;
125
126 vp0 = vp0 * vt0 + vt0;
127 vp1 = vp1 * vt1 + vt1;
128 vp2 = vp2 * vt2 + vt2;
129 vp3 = vp3 * vt3 + vt3;
130 vp4 = vp4 * vt4 + vt4;
131
132 const float ve0 = (vp0 + vs0) * valpha;
133 float vy0 = __builtin_wasm_max_f32(vx0 * vbeta, 0.0f);
134 const float ve1 = (vp1 + vs1) * valpha;
135 float vy1 = __builtin_wasm_max_f32(vx1 * vbeta, 0.0f);
136 const float ve2 = (vp2 + vs2) * valpha;
137 float vy2 = __builtin_wasm_max_f32(vx2 * vbeta, 0.0f);
138 const float ve3 = (vp3 + vs3) * valpha;
139 float vy3 = __builtin_wasm_max_f32(vx3 * vbeta, 0.0f);
140 const float ve4 = (vp4 + vs4) * valpha;
141 float vy4 = __builtin_wasm_max_f32(vx4 * vbeta, 0.0f);
142
143 vy0 += __builtin_wasm_min_f32(ve0, 0.0f);
144 vy1 += __builtin_wasm_min_f32(ve1, 0.0f);
145 vy2 += __builtin_wasm_min_f32(ve2, 0.0f);
146 vy3 += __builtin_wasm_min_f32(ve3, 0.0f);
147 vy4 += __builtin_wasm_min_f32(ve4, 0.0f);
148
149 y[0] = vy0;
150 y[1] = vy1;
151 y[2] = vy2;
152 y[3] = vy3;
153 y[4] = vy4;
154 y += 5;
155 }
156 if XNN_UNLIKELY(n != 0) {
157 do {
158 float vx = *x++;
159
160 const float vz = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx * vprescale, vsat_cutoff), 0.0f);
161
162 float vn = vz * vlog2e + vmagic_bias;
163 float vs = uint32_as_float(float_as_uint32(vn) << 23);
164 vn -= vmagic_bias;
165
166 float vt = vn * vminus_ln2_hi + vz;
167 vt = vn * vminus_ln2_lo + vt;
168
169
170 float vp = vc6 * vt + vc5;
171 vp = vp * vt + vc4;
172 vp = vp * vt + vc3;
173 vp = vp * vt + vc2;
174 vp *= vt;
175
176 vt *= vs;
177 vs -= vone;
178 vp = vp * vt + vt;
179 const float ve = (vp + vs) * valpha;
180
181 float vy = __builtin_wasm_max_f32(vx * vbeta, 0.0f);
182 vy += __builtin_wasm_min_f32(ve, 0.0f);
183
184 *y++ = vy;
185
186 n -= sizeof(float);
187 } while (n != 0);
188 }
189 }
190