1 // Auto-generated file. Do not edit!
2 // Template: src/f32-vsigmoid/wasmsimd-rr2-p5-div.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <wasm_simd128.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/vunary.h>
16
17
xnn_f32_vsigmoid_ukernel__wasmsimd_rr2_p5_div_x24(size_t n,const float * x,float * y,const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_vsigmoid_ukernel__wasmsimd_rr2_p5_div_x24(
19 size_t n,
20 const float* x,
21 float* y,
22 const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
23 {
24 assert(n % sizeof(float) == 0);
25
26 const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
27 const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_log2e);
28 const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_hi);
29 const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_lo);
30 const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
31 const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
32 const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
33 const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
34 const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
35 const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.one);
36 const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
37
38 for (; n >= 24 * sizeof(float); n -= 24 * sizeof(float)) {
39 const v128_t vx0123 = wasm_v128_load(x);
40 const v128_t vx4567 = wasm_v128_load(x + 4);
41 const v128_t vx89AB = wasm_v128_load(x + 8);
42 const v128_t vxCDEF = wasm_v128_load(x + 12);
43 const v128_t vxGHIJ = wasm_v128_load(x + 16);
44 const v128_t vxKLMN = wasm_v128_load(x + 20);
45 x += 24;
46
47 const v128_t vz0123 = wasm_f32x4_abs(vx0123);
48 const v128_t vz4567 = wasm_f32x4_abs(vx4567);
49 const v128_t vz89AB = wasm_f32x4_abs(vx89AB);
50 const v128_t vzCDEF = wasm_f32x4_abs(vxCDEF);
51 const v128_t vzGHIJ = wasm_f32x4_abs(vxGHIJ);
52 const v128_t vzKLMN = wasm_f32x4_abs(vxKLMN);
53
54 v128_t vn0123 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz0123, vminus_log2e));
55 v128_t vn4567 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz4567, vminus_log2e));
56 v128_t vn89AB = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz89AB, vminus_log2e));
57 v128_t vnCDEF = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vzCDEF, vminus_log2e));
58 v128_t vnGHIJ = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vzGHIJ, vminus_log2e));
59 v128_t vnKLMN = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vzKLMN, vminus_log2e));
60
61 const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
62 const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
63 const v128_t vs89AB = wasm_i32x4_shl(vn89AB, 23);
64 const v128_t vsCDEF = wasm_i32x4_shl(vnCDEF, 23);
65 const v128_t vsGHIJ = wasm_i32x4_shl(vnGHIJ, 23);
66 const v128_t vsKLMN = wasm_i32x4_shl(vnKLMN, 23);
67
68 vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
69 vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
70 vn89AB = wasm_f32x4_sub(vn89AB, vmagic_bias);
71 vnCDEF = wasm_f32x4_sub(vnCDEF, vmagic_bias);
72 vnGHIJ = wasm_f32x4_sub(vnGHIJ, vmagic_bias);
73 vnKLMN = wasm_f32x4_sub(vnKLMN, vmagic_bias);
74
75 v128_t vt0123 = wasm_f32x4_add(vz0123, wasm_f32x4_mul(vn0123, vln2_hi));
76 v128_t vt4567 = wasm_f32x4_add(vz4567, wasm_f32x4_mul(vn4567, vln2_hi));
77 v128_t vt89AB = wasm_f32x4_add(vz89AB, wasm_f32x4_mul(vn89AB, vln2_hi));
78 v128_t vtCDEF = wasm_f32x4_add(vzCDEF, wasm_f32x4_mul(vnCDEF, vln2_hi));
79 v128_t vtGHIJ = wasm_f32x4_add(vzGHIJ, wasm_f32x4_mul(vnGHIJ, vln2_hi));
80 v128_t vtKLMN = wasm_f32x4_add(vzKLMN, wasm_f32x4_mul(vnKLMN, vln2_hi));
81
82 vt0123 = wasm_f32x4_add(vt0123, wasm_f32x4_mul(vn0123, vln2_lo));
83 vt4567 = wasm_f32x4_add(vt4567, wasm_f32x4_mul(vn4567, vln2_lo));
84 vt89AB = wasm_f32x4_add(vt89AB, wasm_f32x4_mul(vn89AB, vln2_lo));
85 vtCDEF = wasm_f32x4_add(vtCDEF, wasm_f32x4_mul(vnCDEF, vln2_lo));
86 vtGHIJ = wasm_f32x4_add(vtGHIJ, wasm_f32x4_mul(vnGHIJ, vln2_lo));
87 vtKLMN = wasm_f32x4_add(vtKLMN, wasm_f32x4_mul(vnKLMN, vln2_lo));
88
89 v128_t vp0123 = wasm_f32x4_add(vc4, wasm_f32x4_mul(vt0123, vc5));
90 v128_t vp4567 = wasm_f32x4_add(vc4, wasm_f32x4_mul(vt4567, vc5));
91 v128_t vp89AB = wasm_f32x4_add(vc4, wasm_f32x4_mul(vt89AB, vc5));
92 v128_t vpCDEF = wasm_f32x4_add(vc4, wasm_f32x4_mul(vtCDEF, vc5));
93 v128_t vpGHIJ = wasm_f32x4_add(vc4, wasm_f32x4_mul(vtGHIJ, vc5));
94 v128_t vpKLMN = wasm_f32x4_add(vc4, wasm_f32x4_mul(vtKLMN, vc5));
95
96 vp0123 = wasm_f32x4_add(vc3, wasm_f32x4_mul(vt0123, vp0123));
97 vp4567 = wasm_f32x4_add(vc3, wasm_f32x4_mul(vt4567, vp4567));
98 vp89AB = wasm_f32x4_add(vc3, wasm_f32x4_mul(vt89AB, vp89AB));
99 vpCDEF = wasm_f32x4_add(vc3, wasm_f32x4_mul(vtCDEF, vpCDEF));
100 vpGHIJ = wasm_f32x4_add(vc3, wasm_f32x4_mul(vtGHIJ, vpGHIJ));
101 vpKLMN = wasm_f32x4_add(vc3, wasm_f32x4_mul(vtKLMN, vpKLMN));
102
103 vp0123 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vt0123, vp0123));
104 vp4567 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vt4567, vp4567));
105 vp89AB = wasm_f32x4_add(vc2, wasm_f32x4_mul(vt89AB, vp89AB));
106 vpCDEF = wasm_f32x4_add(vc2, wasm_f32x4_mul(vtCDEF, vpCDEF));
107 vpGHIJ = wasm_f32x4_add(vc2, wasm_f32x4_mul(vtGHIJ, vpGHIJ));
108 vpKLMN = wasm_f32x4_add(vc2, wasm_f32x4_mul(vtKLMN, vpKLMN));
109
110 vp0123 = wasm_f32x4_add(vc1, wasm_f32x4_mul(vt0123, vp0123));
111 vp4567 = wasm_f32x4_add(vc1, wasm_f32x4_mul(vt4567, vp4567));
112 vp89AB = wasm_f32x4_add(vc1, wasm_f32x4_mul(vt89AB, vp89AB));
113 vpCDEF = wasm_f32x4_add(vc1, wasm_f32x4_mul(vtCDEF, vpCDEF));
114 vpGHIJ = wasm_f32x4_add(vc1, wasm_f32x4_mul(vtGHIJ, vpGHIJ));
115 vpKLMN = wasm_f32x4_add(vc1, wasm_f32x4_mul(vtKLMN, vpKLMN));
116
117 vt0123 = wasm_f32x4_mul(vt0123, vs0123);
118 vt4567 = wasm_f32x4_mul(vt4567, vs4567);
119 vt89AB = wasm_f32x4_mul(vt89AB, vs89AB);
120 vtCDEF = wasm_f32x4_mul(vtCDEF, vsCDEF);
121 vtGHIJ = wasm_f32x4_mul(vtGHIJ, vsGHIJ);
122 vtKLMN = wasm_f32x4_mul(vtKLMN, vsKLMN);
123
124 const v128_t ve0123 = wasm_f32x4_add(vs0123, wasm_f32x4_mul(vt0123, vp0123));
125 const v128_t ve4567 = wasm_f32x4_add(vs4567, wasm_f32x4_mul(vt4567, vp4567));
126 const v128_t ve89AB = wasm_f32x4_add(vs89AB, wasm_f32x4_mul(vt89AB, vp89AB));
127 const v128_t veCDEF = wasm_f32x4_add(vsCDEF, wasm_f32x4_mul(vtCDEF, vpCDEF));
128 const v128_t veGHIJ = wasm_f32x4_add(vsGHIJ, wasm_f32x4_mul(vtGHIJ, vpGHIJ));
129 const v128_t veKLMN = wasm_f32x4_add(vsKLMN, wasm_f32x4_mul(vtKLMN, vpKLMN));
130
131 const v128_t vd0123 = wasm_f32x4_add(ve0123, vone);
132 const v128_t vd4567 = wasm_f32x4_add(ve4567, vone);
133 const v128_t vd89AB = wasm_f32x4_add(ve89AB, vone);
134 const v128_t vdCDEF = wasm_f32x4_add(veCDEF, vone);
135 const v128_t vdGHIJ = wasm_f32x4_add(veGHIJ, vone);
136 const v128_t vdKLMN = wasm_f32x4_add(veKLMN, vone);
137
138 v128_t vf0123 = wasm_f32x4_div(ve0123, vd0123);
139 v128_t vf4567 = wasm_f32x4_div(ve4567, vd4567);
140 v128_t vf89AB = wasm_f32x4_div(ve89AB, vd89AB);
141 v128_t vfCDEF = wasm_f32x4_div(veCDEF, vdCDEF);
142 v128_t vfGHIJ = wasm_f32x4_div(veGHIJ, vdGHIJ);
143 v128_t vfKLMN = wasm_f32x4_div(veKLMN, vdKLMN);
144
145 vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_gt(vz0123, vdenorm_cutoff));
146 vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_gt(vz4567, vdenorm_cutoff));
147 vf89AB = wasm_v128_andnot(vf89AB, wasm_f32x4_gt(vz89AB, vdenorm_cutoff));
148 vfCDEF = wasm_v128_andnot(vfCDEF, wasm_f32x4_gt(vzCDEF, vdenorm_cutoff));
149 vfGHIJ = wasm_v128_andnot(vfGHIJ, wasm_f32x4_gt(vzGHIJ, vdenorm_cutoff));
150 vfKLMN = wasm_v128_andnot(vfKLMN, wasm_f32x4_gt(vzKLMN, vdenorm_cutoff));
151
152 vf0123 = wasm_v128_bitselect(vf0123, wasm_f32x4_sub(vone, vf0123), wasm_i32x4_shr(vx0123, 31));
153 vf4567 = wasm_v128_bitselect(vf4567, wasm_f32x4_sub(vone, vf4567), wasm_i32x4_shr(vx4567, 31));
154 vf89AB = wasm_v128_bitselect(vf89AB, wasm_f32x4_sub(vone, vf89AB), wasm_i32x4_shr(vx89AB, 31));
155 vfCDEF = wasm_v128_bitselect(vfCDEF, wasm_f32x4_sub(vone, vfCDEF), wasm_i32x4_shr(vxCDEF, 31));
156 vfGHIJ = wasm_v128_bitselect(vfGHIJ, wasm_f32x4_sub(vone, vfGHIJ), wasm_i32x4_shr(vxGHIJ, 31));
157 vfKLMN = wasm_v128_bitselect(vfKLMN, wasm_f32x4_sub(vone, vfKLMN), wasm_i32x4_shr(vxKLMN, 31));
158
159 wasm_v128_store(y, vf0123);
160 wasm_v128_store(y + 4, vf4567);
161 wasm_v128_store(y + 8, vf89AB);
162 wasm_v128_store(y + 12, vfCDEF);
163 wasm_v128_store(y + 16, vfGHIJ);
164 wasm_v128_store(y + 20, vfKLMN);
165 y += 24;
166 }
167 for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
168 const v128_t vx = wasm_v128_load(x);
169 x += 4;
170
171 const v128_t vz = wasm_f32x4_abs(vx);
172
173 v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vminus_log2e));
174 const v128_t vs = wasm_i32x4_shl(vn, 23);
175 vn = wasm_f32x4_sub(vn, vmagic_bias);
176
177 v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vln2_hi));
178 vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vln2_lo));
179
180 v128_t vp = wasm_f32x4_add(vc4, wasm_f32x4_mul(vt, vc5));
181 vp = wasm_f32x4_add(vc3, wasm_f32x4_mul(vt, vp));
182 vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vt, vp));
183 vp = wasm_f32x4_add(vc1, wasm_f32x4_mul(vt, vp));
184
185 vt = wasm_f32x4_mul(vt, vs);
186 const v128_t ve = wasm_f32x4_add(vs, wasm_f32x4_mul(vt, vp));
187 const v128_t vd = wasm_f32x4_add(ve, vone);
188
189 v128_t vf = wasm_f32x4_div(ve, vd);
190 vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
191 vf = wasm_v128_bitselect(vf, wasm_f32x4_sub(vone, vf), wasm_i32x4_shr(vx, 31));
192
193 wasm_v128_store(y, vf);
194 y += 4;
195 }
196 if XNN_UNLIKELY(n != 0) {
197 const v128_t vx = wasm_v128_load(x);
198
199 const v128_t vz = wasm_f32x4_abs(vx);
200
201 v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vminus_log2e));
202 const v128_t vs = wasm_i32x4_shl(vn, 23);
203 vn = wasm_f32x4_sub(vn, vmagic_bias);
204
205 v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vln2_hi));
206 vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vln2_lo));
207
208 v128_t vp = wasm_f32x4_add(vc4, wasm_f32x4_mul(vt, vc5));
209 vp = wasm_f32x4_add(vc3, wasm_f32x4_mul(vt, vp));
210 vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vt, vp));
211 vp = wasm_f32x4_add(vc1, wasm_f32x4_mul(vt, vp));
212
213 vt = wasm_f32x4_mul(vt, vs);
214 const v128_t ve = wasm_f32x4_add(vs, wasm_f32x4_mul(vt, vp));
215 const v128_t vd = wasm_f32x4_add(ve, vone);
216
217 v128_t vf = wasm_f32x4_div(ve, vd);
218 vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
219 vf = wasm_v128_bitselect(vf, wasm_f32x4_sub(vone, vf), wasm_i32x4_shr(vx, 31));
220
221 if (n & (2 * sizeof(float))) {
222 *((double*) y) = wasm_f64x2_extract_lane(vf, 0);
223 vf = wasm_v32x4_shuffle(vf, vf, 2, 3, 2, 3);
224 y += 2;
225 }
226 if (n & (1 * sizeof(float))) {
227 *y = wasm_f32x4_extract_lane(vf, 0);
228 }
229 }
230 }
231