xref: /aosp_15_r20/external/XNNPACK/src/f32-f16-vcvt/gen/vcvt-wasmsimd-x32.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-f16-vcvt/wasmsimd.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/vcvt.h>
16 
17 
xnn_f32_f16_vcvt_ukernel__wasmsimd_x32(size_t n,const float * input,void * output,const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_f16_vcvt_ukernel__wasmsimd_x32(
19     size_t n,
20     const float* input,
21     void* output,
22     const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
23 {
24   assert(n != 0);
25   assert(n % sizeof(float) == 0);
26   assert(input != NULL);
27   assert(output != NULL);
28 
29   const v128_t vexp_bias = wasm_v128_load64_splat(params->wasmsimd.exp_bias);
30   const v128_t vscale_to_inf = wasm_v128_load64_splat(params->wasmsimd.scale_to_inf);
31   const v128_t vexpw_max = wasm_v128_load64_splat(params->wasmsimd.expw_max);
32   const v128_t vscale_to_zero = wasm_v128_load64_splat(params->wasmsimd.scale_to_zero);
33   const v128_t vbias_min = wasm_v128_load64_splat(params->wasmsimd.bias_min);
34   const v128_t vmanth_mask = wasm_v128_load64_splat(params->wasmsimd.manth_mask);
35   const v128_t vexph_mask = wasm_v128_load64_splat(params->wasmsimd.exph_mask);
36   const v128_t vnanh = wasm_v128_load64_splat(params->wasmsimd.nanh);
37 
38   uint16_t* o = (uint16_t*) output;
39   for (; n >= 32 * sizeof(float); n -= 32 * sizeof(float)) {
40     const v128_t vx0 = wasm_v128_load(input);
41     const v128_t vx1 = wasm_v128_load(input + 4);
42     const v128_t vx2 = wasm_v128_load(input + 8);
43     const v128_t vx3 = wasm_v128_load(input + 12);
44     const v128_t vx4 = wasm_v128_load(input + 16);
45     const v128_t vx5 = wasm_v128_load(input + 20);
46     const v128_t vx6 = wasm_v128_load(input + 24);
47     const v128_t vx7 = wasm_v128_load(input + 28);
48     input += 32;
49 
50     const v128_t vabsx0 = wasm_f32x4_abs(vx0);
51     const v128_t vabsx1 = wasm_f32x4_abs(vx1);
52     const v128_t vabsx2 = wasm_f32x4_abs(vx2);
53     const v128_t vabsx3 = wasm_f32x4_abs(vx3);
54     const v128_t vabsx4 = wasm_f32x4_abs(vx4);
55     const v128_t vabsx5 = wasm_f32x4_abs(vx5);
56     const v128_t vabsx6 = wasm_f32x4_abs(vx6);
57     const v128_t vabsx7 = wasm_f32x4_abs(vx7);
58 
59     const v128_t vsignx0 = wasm_v128_xor(vx0, vabsx0);
60     const v128_t vsignx1 = wasm_v128_xor(vx1, vabsx1);
61     const v128_t vsignx2 = wasm_v128_xor(vx2, vabsx2);
62     const v128_t vsignx3 = wasm_v128_xor(vx3, vabsx3);
63     const v128_t vsignx4 = wasm_v128_xor(vx4, vabsx4);
64     const v128_t vsignx5 = wasm_v128_xor(vx5, vabsx5);
65     const v128_t vsignx6 = wasm_v128_xor(vx6, vabsx6);
66     const v128_t vsignx7 = wasm_v128_xor(vx7, vabsx7);
67 
68     v128_t vbias0 = wasm_i32x4_add(vabsx0, vexp_bias);
69     v128_t vbias1 = wasm_i32x4_add(vabsx1, vexp_bias);
70     v128_t vbias2 = wasm_i32x4_add(vabsx2, vexp_bias);
71     v128_t vbias3 = wasm_i32x4_add(vabsx3, vexp_bias);
72     v128_t vbias4 = wasm_i32x4_add(vabsx4, vexp_bias);
73     v128_t vbias5 = wasm_i32x4_add(vabsx5, vexp_bias);
74     v128_t vbias6 = wasm_i32x4_add(vabsx6, vexp_bias);
75     v128_t vbias7 = wasm_i32x4_add(vabsx7, vexp_bias);
76 
77     v128_t vf0 = wasm_f32x4_mul(vabsx0, vscale_to_inf);
78     v128_t vf1 = wasm_f32x4_mul(vabsx1, vscale_to_inf);
79     v128_t vf2 = wasm_f32x4_mul(vabsx2, vscale_to_inf);
80     v128_t vf3 = wasm_f32x4_mul(vabsx3, vscale_to_inf);
81     v128_t vf4 = wasm_f32x4_mul(vabsx4, vscale_to_inf);
82     v128_t vf5 = wasm_f32x4_mul(vabsx5, vscale_to_inf);
83     v128_t vf6 = wasm_f32x4_mul(vabsx6, vscale_to_inf);
84     v128_t vf7 = wasm_f32x4_mul(vabsx7, vscale_to_inf);
85 
86     const v128_t vnanmaskw0 = wasm_i32x4_gt(vabsx0, vexpw_max);
87     const v128_t vnanmaskw1 = wasm_i32x4_gt(vabsx1, vexpw_max);
88     const v128_t vnanmaskw2 = wasm_i32x4_gt(vabsx2, vexpw_max);
89     const v128_t vnanmaskw3 = wasm_i32x4_gt(vabsx3, vexpw_max);
90     const v128_t vnanmaskw4 = wasm_i32x4_gt(vabsx4, vexpw_max);
91     const v128_t vnanmaskw5 = wasm_i32x4_gt(vabsx5, vexpw_max);
92     const v128_t vnanmaskw6 = wasm_i32x4_gt(vabsx6, vexpw_max);
93     const v128_t vnanmaskw7 = wasm_i32x4_gt(vabsx7, vexpw_max);
94 
95     vbias0 = wasm_v128_and(vbias0, vexpw_max);
96     vbias1 = wasm_v128_and(vbias1, vexpw_max);
97     vbias2 = wasm_v128_and(vbias2, vexpw_max);
98     vbias3 = wasm_v128_and(vbias3, vexpw_max);
99     vbias4 = wasm_v128_and(vbias4, vexpw_max);
100     vbias5 = wasm_v128_and(vbias5, vexpw_max);
101     vbias6 = wasm_v128_and(vbias6, vexpw_max);
102     vbias7 = wasm_v128_and(vbias7, vexpw_max);
103 
104     vf0 = wasm_f32x4_mul(vf0, vscale_to_zero);
105     vf1 = wasm_f32x4_mul(vf1, vscale_to_zero);
106     vf2 = wasm_f32x4_mul(vf2, vscale_to_zero);
107     vf3 = wasm_f32x4_mul(vf3, vscale_to_zero);
108     vf4 = wasm_f32x4_mul(vf4, vscale_to_zero);
109     vf5 = wasm_f32x4_mul(vf5, vscale_to_zero);
110     vf6 = wasm_f32x4_mul(vf6, vscale_to_zero);
111     vf7 = wasm_f32x4_mul(vf7, vscale_to_zero);
112 
113     const v128_t vnanmaskh0 = wasm_i16x8_narrow_i32x4(vnanmaskw0, vnanmaskw1);
114     const v128_t vnanmaskh1 = wasm_i16x8_narrow_i32x4(vnanmaskw2, vnanmaskw3);
115     const v128_t vnanmaskh2 = wasm_i16x8_narrow_i32x4(vnanmaskw4, vnanmaskw5);
116     const v128_t vnanmaskh3 = wasm_i16x8_narrow_i32x4(vnanmaskw6, vnanmaskw7);
117 
118     const v128_t vsignh0 = wasm_i16x8_narrow_i32x4(vsignx0, vsignx1);
119     const v128_t vsignh1 = wasm_i16x8_narrow_i32x4(vsignx2, vsignx3);
120     const v128_t vsignh2 = wasm_i16x8_narrow_i32x4(vsignx4, vsignx5);
121     const v128_t vsignh3 = wasm_i16x8_narrow_i32x4(vsignx6, vsignx7);
122 
123     vbias0 = wasm_i16x8_max(vbias0, vbias_min);
124     vbias1 = wasm_i16x8_max(vbias1, vbias_min);
125     vbias2 = wasm_i16x8_max(vbias2, vbias_min);
126     vbias3 = wasm_i16x8_max(vbias3, vbias_min);
127     vbias4 = wasm_i16x8_max(vbias4, vbias_min);
128     vbias5 = wasm_i16x8_max(vbias5, vbias_min);
129     vbias6 = wasm_i16x8_max(vbias6, vbias_min);
130     vbias7 = wasm_i16x8_max(vbias7, vbias_min);
131 
132     vf0 = wasm_f32x4_add(vf0, vbias0);
133     vf1 = wasm_f32x4_add(vf1, vbias1);
134     vf2 = wasm_f32x4_add(vf2, vbias2);
135     vf3 = wasm_f32x4_add(vf3, vbias3);
136     vf4 = wasm_f32x4_add(vf4, vbias4);
137     vf5 = wasm_f32x4_add(vf5, vbias5);
138     vf6 = wasm_f32x4_add(vf6, vbias6);
139     vf7 = wasm_f32x4_add(vf7, vbias7);
140 
141     v128_t vexpw0 = wasm_i32x4_shr(vf0, 13);
142     v128_t vexpw1 = wasm_i32x4_shr(vf1, 13);
143     v128_t vexpw2 = wasm_i32x4_shr(vf2, 13);
144     v128_t vexpw3 = wasm_i32x4_shr(vf3, 13);
145     v128_t vexpw4 = wasm_i32x4_shr(vf4, 13);
146     v128_t vexpw5 = wasm_i32x4_shr(vf5, 13);
147     v128_t vexpw6 = wasm_i32x4_shr(vf6, 13);
148     v128_t vexpw7 = wasm_i32x4_shr(vf7, 13);
149 
150     const v128_t vmantw0 = wasm_v128_and(vf0, vmanth_mask);
151     const v128_t vmantw1 = wasm_v128_and(vf1, vmanth_mask);
152     const v128_t vmantw2 = wasm_v128_and(vf2, vmanth_mask);
153     const v128_t vmantw3 = wasm_v128_and(vf3, vmanth_mask);
154     const v128_t vmantw4 = wasm_v128_and(vf4, vmanth_mask);
155     const v128_t vmantw5 = wasm_v128_and(vf5, vmanth_mask);
156     const v128_t vmantw6 = wasm_v128_and(vf6, vmanth_mask);
157     const v128_t vmantw7 = wasm_v128_and(vf7, vmanth_mask);
158 
159     vexpw0 = wasm_v128_and(vexpw0, vexph_mask);
160     vexpw1 = wasm_v128_and(vexpw1, vexph_mask);
161     vexpw2 = wasm_v128_and(vexpw2, vexph_mask);
162     vexpw3 = wasm_v128_and(vexpw3, vexph_mask);
163     vexpw4 = wasm_v128_and(vexpw4, vexph_mask);
164     vexpw5 = wasm_v128_and(vexpw5, vexph_mask);
165     vexpw6 = wasm_v128_and(vexpw6, vexph_mask);
166     vexpw7 = wasm_v128_and(vexpw7, vexph_mask);
167 
168     const v128_t vnonsignw0 = wasm_i32x4_add(vmantw0, vexpw0);
169     const v128_t vnonsignw1 = wasm_i32x4_add(vmantw1, vexpw1);
170     const v128_t vnonsignw2 = wasm_i32x4_add(vmantw2, vexpw2);
171     const v128_t vnonsignw3 = wasm_i32x4_add(vmantw3, vexpw3);
172     const v128_t vnonsignw4 = wasm_i32x4_add(vmantw4, vexpw4);
173     const v128_t vnonsignw5 = wasm_i32x4_add(vmantw5, vexpw5);
174     const v128_t vnonsignw6 = wasm_i32x4_add(vmantw6, vexpw6);
175     const v128_t vnonsignw7 = wasm_i32x4_add(vmantw7, vexpw7);
176 
177     const v128_t vnonsignh0 = wasm_i16x8_narrow_i32x4(vnonsignw0, vnonsignw1);
178     const v128_t vnonsignh1 = wasm_i16x8_narrow_i32x4(vnonsignw2, vnonsignw3);
179     const v128_t vnonsignh2 = wasm_i16x8_narrow_i32x4(vnonsignw4, vnonsignw5);
180     const v128_t vnonsignh3 = wasm_i16x8_narrow_i32x4(vnonsignw6, vnonsignw7);
181 
182     const v128_t vabsh0 = wasm_v128_bitselect(vnanh, vnonsignh0, vnanmaskh0);
183     const v128_t vabsh1 = wasm_v128_bitselect(vnanh, vnonsignh1, vnanmaskh1);
184     const v128_t vabsh2 = wasm_v128_bitselect(vnanh, vnonsignh2, vnanmaskh2);
185     const v128_t vabsh3 = wasm_v128_bitselect(vnanh, vnonsignh3, vnanmaskh3);
186 
187     const v128_t vh0 = wasm_v128_or(vabsh0, vsignh0);
188     const v128_t vh1 = wasm_v128_or(vabsh1, vsignh1);
189     const v128_t vh2 = wasm_v128_or(vabsh2, vsignh2);
190     const v128_t vh3 = wasm_v128_or(vabsh3, vsignh3);
191 
192     wasm_v128_store(o, vh0);
193     wasm_v128_store(o + 8, vh1);
194     wasm_v128_store(o + 16, vh2);
195     wasm_v128_store(o + 24, vh3);
196     o += 32;
197   }
198   for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
199     const v128_t vx_lo = wasm_v128_load(input);
200     const v128_t vx_hi = wasm_v128_load(input + 4);
201     input += 8;
202 
203     const v128_t vabsx_lo = wasm_f32x4_abs(vx_lo);
204     const v128_t vabsx_hi = wasm_f32x4_abs(vx_hi);
205 
206     const v128_t vsignx_lo = wasm_v128_xor(vx_lo, vabsx_lo);
207     const v128_t vsignx_hi = wasm_v128_xor(vx_hi, vabsx_hi);
208     v128_t vbias_lo = wasm_i32x4_add(vabsx_lo, vexp_bias);
209     v128_t vbias_hi = wasm_i32x4_add(vabsx_hi, vexp_bias);
210     v128_t vf_lo = wasm_f32x4_mul(vabsx_lo, vscale_to_inf);
211     v128_t vf_hi = wasm_f32x4_mul(vabsx_hi, vscale_to_inf);
212     const v128_t vnanmaskw_lo = wasm_i32x4_gt(vabsx_lo, vexpw_max);
213     const v128_t vnanmaskw_hi = wasm_i32x4_gt(vabsx_hi, vexpw_max);
214 
215     vbias_lo = wasm_v128_and(vbias_lo, vexpw_max);
216     vbias_hi = wasm_v128_and(vbias_hi, vexpw_max);
217     vf_lo = wasm_f32x4_mul(vf_lo, vscale_to_zero);
218     vf_hi = wasm_f32x4_mul(vf_hi, vscale_to_zero);
219     const v128_t vnanmaskh = wasm_i16x8_narrow_i32x4(vnanmaskw_lo, vnanmaskw_hi);
220     const v128_t vsignh = wasm_i16x8_narrow_i32x4(vsignx_lo, vsignx_hi);
221 
222     vbias_lo = wasm_i16x8_max(vbias_lo, vbias_min);
223     vbias_hi = wasm_i16x8_max(vbias_hi, vbias_min);
224 
225     vf_lo = wasm_f32x4_add(vf_lo, vbias_lo);
226     vf_hi = wasm_f32x4_add(vf_hi, vbias_hi);
227 
228     v128_t vexpw_lo = wasm_i32x4_shr(vf_lo, 13);
229     v128_t vexpw_hi = wasm_i32x4_shr(vf_hi, 13);
230     const v128_t vmantw_lo = wasm_v128_and(vf_lo, vmanth_mask);
231     const v128_t vmantw_hi = wasm_v128_and(vf_hi, vmanth_mask);
232 
233     vexpw_lo = wasm_v128_and(vexpw_lo, vexph_mask);
234     vexpw_hi = wasm_v128_and(vexpw_hi, vexph_mask);
235 
236     const v128_t vnonsignw_lo = wasm_i32x4_add(vmantw_lo, vexpw_lo);
237     const v128_t vnonsignw_hi = wasm_i32x4_add(vmantw_hi, vexpw_hi);
238 
239     const v128_t vnonsignh = wasm_i16x8_narrow_i32x4(vnonsignw_lo, vnonsignw_hi);
240 
241     const v128_t vabsh = wasm_v128_bitselect(vnanh, vnonsignh, vnanmaskh);
242 
243     const v128_t vh = wasm_v128_or(vabsh, vsignh);
244 
245     wasm_v128_store(o, vh);
246     o += 8;
247   }
248   if XNN_UNPREDICTABLE(n != 0) {
249     const v128_t vx_lo = wasm_v128_load(input);
250     const float* input_hi = (const float*) ((uintptr_t) input + (n & (4 * sizeof(float))));
251     const v128_t vx_hi = wasm_v128_load(input_hi);
252 
253     const v128_t vabsx_lo = wasm_f32x4_abs(vx_lo);
254     const v128_t vabsx_hi = wasm_f32x4_abs(vx_hi);
255 
256     const v128_t vsignx_lo = wasm_v128_xor(vx_lo, vabsx_lo);
257     const v128_t vsignx_hi = wasm_v128_xor(vx_hi, vabsx_hi);
258     v128_t vbias_lo = wasm_i32x4_add(vabsx_lo, vexp_bias);
259     v128_t vbias_hi = wasm_i32x4_add(vabsx_hi, vexp_bias);
260     v128_t vf_lo = wasm_f32x4_mul(vabsx_lo, vscale_to_inf);
261     v128_t vf_hi = wasm_f32x4_mul(vabsx_hi, vscale_to_inf);
262     const v128_t vnanmaskw_lo = wasm_i32x4_gt(vabsx_lo, vexpw_max);
263     const v128_t vnanmaskw_hi = wasm_i32x4_gt(vabsx_hi, vexpw_max);
264 
265     vbias_lo = wasm_v128_and(vbias_lo, vexpw_max);
266     vbias_hi = wasm_v128_and(vbias_hi, vexpw_max);
267     vf_lo = wasm_f32x4_mul(vf_lo, vscale_to_zero);
268     vf_hi = wasm_f32x4_mul(vf_hi, vscale_to_zero);
269     const v128_t vnanmaskh = wasm_i16x8_narrow_i32x4(vnanmaskw_lo, vnanmaskw_hi);
270     const v128_t vsignh = wasm_i16x8_narrow_i32x4(vsignx_lo, vsignx_hi);
271 
272     vbias_lo = wasm_i16x8_max(vbias_lo, vbias_min);
273     vbias_hi = wasm_i16x8_max(vbias_hi, vbias_min);
274 
275     vf_lo = wasm_f32x4_add(vf_lo, vbias_lo);
276     vf_hi = wasm_f32x4_add(vf_hi, vbias_hi);
277 
278     v128_t vexpw_lo = wasm_i32x4_shr(vf_lo, 13);
279     v128_t vexpw_hi = wasm_i32x4_shr(vf_hi, 13);
280     const v128_t vmantw_lo = wasm_v128_and(vf_lo, vmanth_mask);
281     const v128_t vmantw_hi = wasm_v128_and(vf_hi, vmanth_mask);
282 
283     vexpw_lo = wasm_v128_and(vexpw_lo, vexph_mask);
284     vexpw_hi = wasm_v128_and(vexpw_hi, vexph_mask);
285 
286     const v128_t vnonsignw_lo = wasm_i32x4_add(vmantw_lo, vexpw_lo);
287     const v128_t vnonsignw_hi = wasm_i32x4_add(vmantw_hi, vexpw_hi);
288 
289     const v128_t vnonsignh = wasm_i16x8_narrow_i32x4(vnonsignw_lo, vnonsignw_hi);
290 
291     const v128_t vabsh = wasm_v128_bitselect(vnanh, vnonsignh, vnanmaskh);
292 
293     v128_t vh = wasm_v128_or(vabsh, vsignh);
294 
295     if (n & (4 * sizeof(float))) {
296       *((double*) o) = wasm_f64x2_extract_lane(vh, 0);
297       vh = wasm_v64x2_shuffle(vh, vh, 1, 1);
298       o += 4;
299     }
300     if (n & (2 * sizeof(float))) {
301       *((float*) o) = (float) wasm_f32x4_extract_lane(vh, 0);
302       vh = wasm_i64x2_shr(vh, 32);
303       o += 2;
304     }
305     const uint32_t vh_lo = wasm_i32x4_extract_lane(vh, 0);
306     if (n & (1 * sizeof(float))) {
307       *o = (uint16_t) vh_lo;
308     }
309   }
310 }
311