xref: /aosp_15_r20/external/XNNPACK/src/f32-vsigmoid/gen/vsigmoid-wasmsimd-rr2-p5-div-x12.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-vsigmoid/wasmsimd-rr2-p5-div.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/vunary.h>
16 
17 
xnn_f32_vsigmoid_ukernel__wasmsimd_rr2_p5_div_x12(size_t n,const float * x,float * y,const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_vsigmoid_ukernel__wasmsimd_rr2_p5_div_x12(
19     size_t n,
20     const float* x,
21     float* y,
22     const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
23 {
24   assert(n % sizeof(float) == 0);
25 
26   const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
27   const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_log2e);
28   const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_hi);
29   const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_lo);
30   const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
31   const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
32   const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
33   const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
34   const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
35   const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.one);
36   const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
37 
38   for (; n >= 12 * sizeof(float); n -= 12 * sizeof(float)) {
39     const v128_t vx0123 = wasm_v128_load(x);
40     const v128_t vx4567 = wasm_v128_load(x + 4);
41     const v128_t vx89AB = wasm_v128_load(x + 8);
42     x += 12;
43 
44     const v128_t vz0123 = wasm_f32x4_abs(vx0123);
45     const v128_t vz4567 = wasm_f32x4_abs(vx4567);
46     const v128_t vz89AB = wasm_f32x4_abs(vx89AB);
47 
48     v128_t vn0123 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz0123, vminus_log2e));
49     v128_t vn4567 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz4567, vminus_log2e));
50     v128_t vn89AB = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz89AB, vminus_log2e));
51 
52     const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
53     const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
54     const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
55 
56     vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
57     vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
58     vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
59 
60     v128_t vt0123 = wasm_f32x4_add(vz0123, wasm_f32x4_mul(vn0123, vln2_hi));
61     v128_t vt4567 = wasm_f32x4_add(vz4567, wasm_f32x4_mul(vn4567, vln2_hi));
62     v128_t vt89AB = wasm_f32x4_add(vz89AB, wasm_f32x4_mul(vn89AB, vln2_hi));
63 
64     vt0123 = wasm_f32x4_add(vt0123, wasm_f32x4_mul(vn0123, vln2_lo));
65     vt4567 = wasm_f32x4_add(vt4567, wasm_f32x4_mul(vn4567, vln2_lo));
66     vt89AB = wasm_f32x4_add(vt89AB, wasm_f32x4_mul(vn89AB, vln2_lo));
67 
68     v128_t vp0123 = wasm_f32x4_add(vc4, wasm_f32x4_mul(vt0123, vc5));
69     v128_t vp4567 = wasm_f32x4_add(vc4, wasm_f32x4_mul(vt4567, vc5));
70     v128_t vp89AB = wasm_f32x4_add(vc4, wasm_f32x4_mul(vt89AB, vc5));
71 
72     vp0123 = wasm_f32x4_add(vc3, wasm_f32x4_mul(vt0123, vp0123));
73     vp4567 = wasm_f32x4_add(vc3, wasm_f32x4_mul(vt4567, vp4567));
74     vp89AB = wasm_f32x4_add(vc3, wasm_f32x4_mul(vt89AB, vp89AB));
75 
76     vp0123 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vt0123, vp0123));
77     vp4567 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vt4567, vp4567));
78     vp89AB = wasm_f32x4_add(vc2, wasm_f32x4_mul(vt89AB, vp89AB));
79 
80     vp0123 = wasm_f32x4_add(vc1, wasm_f32x4_mul(vt0123, vp0123));
81     vp4567 = wasm_f32x4_add(vc1, wasm_f32x4_mul(vt4567, vp4567));
82     vp89AB = wasm_f32x4_add(vc1, wasm_f32x4_mul(vt89AB, vp89AB));
83 
84     vt0123 = wasm_f32x4_mul(vt0123, vs0123);
85     vt4567 = wasm_f32x4_mul(vt4567, vs4567);
86     vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
87 
88     const v128_t ve0123 = wasm_f32x4_add(vs0123, wasm_f32x4_mul(vt0123, vp0123));
89     const v128_t ve4567 = wasm_f32x4_add(vs4567, wasm_f32x4_mul(vt4567, vp4567));
90     const v128_t ve89AB = wasm_f32x4_add(vs89AB, wasm_f32x4_mul(vt89AB, vp89AB));
91 
92     const v128_t vd0123 = wasm_f32x4_add(ve0123, vone);
93     const v128_t vd4567 = wasm_f32x4_add(ve4567, vone);
94     const v128_t vd89AB = wasm_f32x4_add(ve89AB, vone);
95 
96     v128_t vf0123 = wasm_f32x4_div(ve0123, vd0123);
97     v128_t vf4567 = wasm_f32x4_div(ve4567, vd4567);
98     v128_t vf89AB = wasm_f32x4_div(ve89AB, vd89AB);
99 
100     vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_gt(vz0123, vdenorm_cutoff));
101     vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_gt(vz4567, vdenorm_cutoff));
102     vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_gt(vz89AB, vdenorm_cutoff));
103 
104     vf0123 = wasm_v128_bitselect(vf0123, wasm_f32x4_sub(vone, vf0123), wasm_i32x4_shr(vx0123, 31));
105     vf4567 = wasm_v128_bitselect(vf4567, wasm_f32x4_sub(vone, vf4567), wasm_i32x4_shr(vx4567, 31));
106     vf89AB = wasm_v128_bitselect(vf89AB, wasm_f32x4_sub(vone, vf89AB), wasm_i32x4_shr(vx89AB, 31));
107 
108     wasm_v128_store(y, vf0123);
109     wasm_v128_store(y + 4, vf4567);
110     wasm_v128_store(y + 8, vf89AB);
111     y += 12;
112   }
113   for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
114     const v128_t vx = wasm_v128_load(x);
115     x += 4;
116 
117     const v128_t vz = wasm_f32x4_abs(vx);
118 
119     v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vminus_log2e));
120     const v128_t vs = wasm_i32x4_shl(vn, 23);
121     vn = wasm_f32x4_sub(vn, vmagic_bias);
122 
123     v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vln2_hi));
124     vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vln2_lo));
125 
126     v128_t vp = wasm_f32x4_add(vc4, wasm_f32x4_mul(vt, vc5));
127     vp = wasm_f32x4_add(vc3, wasm_f32x4_mul(vt, vp));
128     vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vt, vp));
129     vp = wasm_f32x4_add(vc1, wasm_f32x4_mul(vt, vp));
130 
131     vt = wasm_f32x4_mul(vt, vs);
132     const v128_t ve = wasm_f32x4_add(vs, wasm_f32x4_mul(vt, vp));
133     const v128_t vd = wasm_f32x4_add(ve, vone);
134 
135     v128_t vf = wasm_f32x4_div(ve, vd);
136     vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
137     vf = wasm_v128_bitselect(vf, wasm_f32x4_sub(vone, vf), wasm_i32x4_shr(vx, 31));
138 
139     wasm_v128_store(y, vf);
140     y += 4;
141   }
142   if XNN_UNLIKELY(n != 0) {
143     const v128_t vx = wasm_v128_load(x);
144 
145     const v128_t vz = wasm_f32x4_abs(vx);
146 
147     v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vminus_log2e));
148     const v128_t vs = wasm_i32x4_shl(vn, 23);
149     vn = wasm_f32x4_sub(vn, vmagic_bias);
150 
151     v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vln2_hi));
152     vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vln2_lo));
153 
154     v128_t vp = wasm_f32x4_add(vc4, wasm_f32x4_mul(vt, vc5));
155     vp = wasm_f32x4_add(vc3, wasm_f32x4_mul(vt, vp));
156     vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vt, vp));
157     vp = wasm_f32x4_add(vc1, wasm_f32x4_mul(vt, vp));
158 
159     vt = wasm_f32x4_mul(vt, vs);
160     const v128_t ve = wasm_f32x4_add(vs, wasm_f32x4_mul(vt, vp));
161     const v128_t vd = wasm_f32x4_add(ve, vone);
162 
163     v128_t vf = wasm_f32x4_div(ve, vd);
164     vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
165     vf = wasm_v128_bitselect(vf, wasm_f32x4_sub(vone, vf), wasm_i32x4_shr(vx, 31));
166 
167     if (n & (2 * sizeof(float))) {
168       *((double*) y) = wasm_f64x2_extract_lane(vf, 0);
169       vf = wasm_v32x4_shuffle(vf, vf, 2, 3, 2, 3);
170       y += 2;
171     }
172     if (n & (1 * sizeof(float))) {
173       *y = wasm_f32x4_extract_lane(vf, 0);
174     }
175   }
176 }
177