1 // Auto-generated file. Do not edit!
2 // Template: src/f32-velu/scalar-rr2-lut16-p3.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11 #include <math.h>
12
13 #include <xnnpack/common.h>
14 #include <xnnpack/math.h>
15 #include <xnnpack/vunary.h>
16
17
18 extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
19
xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x2(size_t n,const float * x,float * y,const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS (1)])20 void xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x2(
21 size_t n,
22 const float* x,
23 float* y,
24 const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
25 {
26 assert(n % sizeof(float) == 0);
27
28 const float vprescale = params->scalar_rr2_lut16_p3.prescale;
29 const float valpha = params->scalar_rr2_lut16_p3.alpha;
30 const float vbeta = params->scalar_rr2_lut16_p3.beta;
31 const float vmagic_bias = params->scalar_rr2_lut16_p3.magic_bias;
32 const float vlog2e = params->scalar_rr2_lut16_p3.log2e;
33 const uint32_t vindex_mask = UINT32_C(0xF);
34 const float vsat_cutoff = params->scalar_rr2_lut16_p3.sat_cutoff;
35 const float vminus_ln2_hi = params->scalar_rr2_lut16_p3.minus_ln2_hi;
36 const float vminus_ln2_lo = params->scalar_rr2_lut16_p3.minus_ln2_lo;
37 const float vc3 = params->scalar_rr2_lut16_p3.c3;
38 const float vc2 = params->scalar_rr2_lut16_p3.c2;
39 const float vone = params->scalar_rr2_lut16_p3.one;
40
41 for (; n >= 2 * sizeof(float); n -= 2 * sizeof(float)) {
42 float vx0 = x[0];
43 float vx1 = x[1];
44 x += 2;
45
46 const float vz0 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx0 * vprescale, vsat_cutoff), 0.0f);
47 const float vz1 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx1 * vprescale, vsat_cutoff), 0.0f);
48
49 float vn0 = vz0 * vlog2e + vmagic_bias;
50 float vn1 = vz1 * vlog2e + vmagic_bias;
51
52 const uint32_t ven0 = float_as_uint32(vn0) << 19;
53 const uint32_t vidx0 = float_as_uint32(vn0) & vindex_mask;
54 vn0 -= vmagic_bias;
55 const uint32_t ven1 = float_as_uint32(vn1) << 19;
56 const uint32_t vidx1 = float_as_uint32(vn1) & vindex_mask;
57 vn1 -= vmagic_bias;
58
59 float vt0 = vn0 * vminus_ln2_hi + vz0;
60 float vs0 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx0] + ven0);
61 float vt1 = vn1 * vminus_ln2_hi + vz1;
62 float vs1 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx1] + ven1);
63
64 vt0 = vn0 * vminus_ln2_lo + vt0;
65 vt1 = vn1 * vminus_ln2_lo + vt1;
66
67 float vp0 = vc3 * vt0 + vc2;
68 float vp1 = vc3 * vt1 + vc2;
69
70 vp0 *= vt0;
71 vp1 *= vt1;
72
73 vt0 *= vs0;
74 vs0 -= vone;
75 vt1 *= vs1;
76 vs1 -= vone;
77
78 vp0 = vp0 * vt0 + vt0;
79 vp1 = vp1 * vt1 + vt1;
80
81 const float ve0 = (vp0 + vs0) * valpha;
82 float vy0 = __builtin_wasm_max_f32(vx0 * vbeta, 0.0f);
83 const float ve1 = (vp1 + vs1) * valpha;
84 float vy1 = __builtin_wasm_max_f32(vx1 * vbeta, 0.0f);
85
86 vy0 += __builtin_wasm_min_f32(ve0, 0.0f);
87 vy1 += __builtin_wasm_min_f32(ve1, 0.0f);
88
89 y[0] = vy0;
90 y[1] = vy1;
91 y += 2;
92 }
93 if XNN_UNLIKELY(n != 0) {
94 float vx = *x;
95
96 const float vz = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx * vprescale, vsat_cutoff), 0.0f);
97
98 float vn = vz * vlog2e + vmagic_bias;
99 const uint32_t ven = float_as_uint32(vn) << 19;
100 const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
101 vn -= vmagic_bias;
102
103 float vt = vn * vminus_ln2_hi + vz;
104 float vs = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx] + ven);
105
106 vt = vn * vminus_ln2_lo + vt;
107
108 float vp = vc3 * vt + vc2;
109 vp *= vt;
110
111 vt *= vs;
112 vs -= vone;
113 vp = vp * vt + vt;
114 const float ve = (vp + vs) * valpha;
115
116 float vy = __builtin_wasm_max_f32(vx * vbeta, 0.0f);
117 vy += __builtin_wasm_min_f32(ve, 0.0f);
118
119 *y = vy;
120 }
121 }
122