xref: /aosp_15_r20/external/XNNPACK/src/f32-velu/gen/velu-wasm-rr2-lut16-p3-x3.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-velu/scalar-rr2-lut16-p3.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 #include <math.h>
12 
13 #include <xnnpack/common.h>
14 #include <xnnpack/math.h>
15 #include <xnnpack/vunary.h>
16 
17 
18 extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16];
19 
xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x3(size_t n,const float * x,float * y,const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS (1)])20 void xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x3(
21     size_t n,
22     const float* x,
23     float* y,
24     const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)])
25 {
26   assert(n % sizeof(float) == 0);
27 
28   const float vprescale = params->scalar_rr2_lut16_p3.prescale;
29   const float valpha = params->scalar_rr2_lut16_p3.alpha;
30   const float vbeta = params->scalar_rr2_lut16_p3.beta;
31   const float vmagic_bias = params->scalar_rr2_lut16_p3.magic_bias;
32   const float vlog2e = params->scalar_rr2_lut16_p3.log2e;
33   const uint32_t vindex_mask = UINT32_C(0xF);
34   const float vsat_cutoff = params->scalar_rr2_lut16_p3.sat_cutoff;
35   const float vminus_ln2_hi = params->scalar_rr2_lut16_p3.minus_ln2_hi;
36   const float vminus_ln2_lo = params->scalar_rr2_lut16_p3.minus_ln2_lo;
37   const float vc3 = params->scalar_rr2_lut16_p3.c3;
38   const float vc2 = params->scalar_rr2_lut16_p3.c2;
39   const float vone = params->scalar_rr2_lut16_p3.one;
40 
41   for (; n >= 3 * sizeof(float); n -= 3 * sizeof(float)) {
42     float vx0 = x[0];
43     float vx1 = x[1];
44     float vx2 = x[2];
45     x += 3;
46 
47     const float vz0 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx0 * vprescale, vsat_cutoff), 0.0f);
48     const float vz1 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx1 * vprescale, vsat_cutoff), 0.0f);
49     const float vz2 = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx2 * vprescale, vsat_cutoff), 0.0f);
50 
51     float vn0 = vz0 * vlog2e + vmagic_bias;
52     float vn1 = vz1 * vlog2e + vmagic_bias;
53     float vn2 = vz2 * vlog2e + vmagic_bias;
54 
55     const uint32_t ven0 = float_as_uint32(vn0) << 19;
56     const uint32_t vidx0 = float_as_uint32(vn0) & vindex_mask;
57     vn0 -= vmagic_bias;
58     const uint32_t ven1 = float_as_uint32(vn1) << 19;
59     const uint32_t vidx1 = float_as_uint32(vn1) & vindex_mask;
60     vn1 -= vmagic_bias;
61     const uint32_t ven2 = float_as_uint32(vn2) << 19;
62     const uint32_t vidx2 = float_as_uint32(vn2) & vindex_mask;
63     vn2 -= vmagic_bias;
64 
65     float vt0 = vn0 * vminus_ln2_hi + vz0;
66     float vs0 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx0] + ven0);
67     float vt1 = vn1 * vminus_ln2_hi + vz1;
68     float vs1 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx1] + ven1);
69     float vt2 = vn2 * vminus_ln2_hi + vz2;
70     float vs2 = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx2] + ven2);
71 
72     vt0 = vn0 * vminus_ln2_lo + vt0;
73     vt1 = vn1 * vminus_ln2_lo + vt1;
74     vt2 = vn2 * vminus_ln2_lo + vt2;
75 
76     float vp0 = vc3 * vt0 + vc2;
77     float vp1 = vc3 * vt1 + vc2;
78     float vp2 = vc3 * vt2 + vc2;
79 
80     vp0 *= vt0;
81     vp1 *= vt1;
82     vp2 *= vt2;
83 
84     vt0 *= vs0;
85     vs0 -= vone;
86     vt1 *= vs1;
87     vs1 -= vone;
88     vt2 *= vs2;
89     vs2 -= vone;
90 
91     vp0 = vp0 * vt0 + vt0;
92     vp1 = vp1 * vt1 + vt1;
93     vp2 = vp2 * vt2 + vt2;
94 
95     const float ve0 = (vp0 + vs0) * valpha;
96     float vy0 = __builtin_wasm_max_f32(vx0 * vbeta, 0.0f);
97     const float ve1 = (vp1 + vs1) * valpha;
98     float vy1 = __builtin_wasm_max_f32(vx1 * vbeta, 0.0f);
99     const float ve2 = (vp2 + vs2) * valpha;
100     float vy2 = __builtin_wasm_max_f32(vx2 * vbeta, 0.0f);
101 
102     vy0 += __builtin_wasm_min_f32(ve0, 0.0f);
103     vy1 += __builtin_wasm_min_f32(ve1, 0.0f);
104     vy2 += __builtin_wasm_min_f32(ve2, 0.0f);
105 
106     y[0] = vy0;
107     y[1] = vy1;
108     y[2] = vy2;
109     y += 3;
110   }
111   if XNN_UNLIKELY(n != 0) {
112     do {
113       float vx = *x++;
114 
115       const float vz = __builtin_wasm_min_f32(__builtin_wasm_max_f32(vx * vprescale, vsat_cutoff), 0.0f);
116 
117       float vn = vz * vlog2e + vmagic_bias;
118       const uint32_t ven = float_as_uint32(vn) << 19;
119       const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
120       vn -= vmagic_bias;
121 
122       float vt = vn * vminus_ln2_hi + vz;
123       float vs = uint32_as_float(xnn_table_exp2minus_k_over_16[vidx] + ven);
124 
125       vt = vn * vminus_ln2_lo + vt;
126 
127       float vp = vc3 * vt + vc2;
128       vp *= vt;
129 
130       vt *= vs;
131       vs -= vone;
132       vp = vp * vt + vt;
133       const float ve = (vp + vs) * valpha;
134 
135       float vy = __builtin_wasm_max_f32(vx * vbeta, 0.0f);
136       vy += __builtin_wasm_min_f32(ve, 0.0f);
137 
138       *y++ = vy;
139 
140       n -= sizeof(float);
141     } while (n != 0);
142   }
143 }
144