xref: /aosp_15_r20/external/XNNPACK/src/math/cvt-f32-f16-wasmsimd.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Copyright 2021 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5 
6 #include <assert.h>
7 #include <stddef.h>
8 #include <stdint.h>
9 
10 #include <wasm_simd128.h>
11 
12 #include <xnnpack/math-stubs.h>
13 
14 
xnn_math_f32_f16_cvt__wasmsimd(size_t n,const float * input,void * output)15 void xnn_math_f32_f16_cvt__wasmsimd(
16     size_t n,
17     const float* input,
18     void* output)
19 {
20   assert(n % (8 * sizeof(uint16_t)) == 0);
21 
22   const v128_t vscale_to_inf = wasm_f32x4_const_splat(0x1.0p+112f);
23   const v128_t vscale_to_zero = wasm_f32x4_const_splat(0x1.0p-110f);
24   const v128_t vexp_bias = wasm_i32x4_const_splat(0x07800000);
25   const v128_t vexpw_max = wasm_i32x4_const_splat(0x7F800000);
26   const v128_t vbias_min = wasm_i32x4_const_splat(0x40008000);
27   const v128_t vexph_mask = wasm_i32x4_const_splat(0x7C00);
28   const v128_t vmanth_mask = wasm_i32x4_const_splat(0x0FFF);
29   const v128_t vnanh = wasm_i16x8_const_splat(0x7E00);
30 
31   uint16_t* o = (uint16_t*) output;
32   for (; n != 0; n -= 8 * sizeof(uint16_t)) {
33     const v128_t vx_lo = wasm_v128_load(input);
34     const v128_t vx_hi = wasm_v128_load(input + 4);
35     input += 8;
36 
37     const v128_t vabsx_lo = wasm_f32x4_abs(vx_lo);
38     const v128_t vabsx_hi = wasm_f32x4_abs(vx_hi);
39 
40     const v128_t vsignx_lo = wasm_v128_xor(vx_lo, vabsx_lo);
41     const v128_t vsignx_hi = wasm_v128_xor(vx_hi, vabsx_hi);
42     v128_t vbias_lo = wasm_i32x4_add(vabsx_lo, vexp_bias);
43     v128_t vbias_hi = wasm_i32x4_add(vabsx_hi, vexp_bias);
44     v128_t vf_lo = wasm_f32x4_mul(vabsx_lo, vscale_to_inf);
45     v128_t vf_hi = wasm_f32x4_mul(vabsx_hi, vscale_to_inf);
46     const v128_t vnanmaskw_lo = wasm_i32x4_gt(vabsx_lo, vexpw_max);
47     const v128_t vnanmaskw_hi = wasm_i32x4_gt(vabsx_hi, vexpw_max);
48 
49     vbias_lo = wasm_v128_and(vbias_lo, vexpw_max);
50     vbias_hi = wasm_v128_and(vbias_hi, vexpw_max);
51     vf_lo = wasm_f32x4_mul(vf_lo, vscale_to_zero);
52     vf_hi = wasm_f32x4_mul(vf_hi, vscale_to_zero);
53     const v128_t vnanmaskh = wasm_i16x8_narrow_i32x4(vnanmaskw_lo, vnanmaskw_hi);
54     const v128_t vsignh = wasm_i16x8_narrow_i32x4(vsignx_lo, vsignx_hi);
55 
56     vbias_lo = wasm_i16x8_max(vbias_lo, vbias_min);
57     vbias_hi = wasm_i16x8_max(vbias_hi, vbias_min);
58 
59     vf_lo = wasm_f32x4_add(vf_lo, vbias_lo);
60     vf_hi = wasm_f32x4_add(vf_hi, vbias_hi);
61 
62     v128_t vexpw_lo = wasm_i32x4_shr(vf_lo, 13);
63     v128_t vexpw_hi = wasm_i32x4_shr(vf_hi, 13);
64     const v128_t vmantw_lo = wasm_v128_and(vf_lo, vmanth_mask);
65     const v128_t vmantw_hi = wasm_v128_and(vf_hi, vmanth_mask);
66 
67     vexpw_lo = wasm_v128_and(vexpw_lo, vexph_mask);
68     vexpw_hi = wasm_v128_and(vexpw_hi, vexph_mask);
69 
70     const v128_t vnonsignw_lo = wasm_i32x4_add(vmantw_lo, vexpw_lo);
71     const v128_t vnonsignw_hi = wasm_i32x4_add(vmantw_hi, vexpw_hi);
72 
73     const v128_t vnonsignh = wasm_i16x8_narrow_i32x4(vnonsignw_lo, vnonsignw_hi);
74 
75     const v128_t vabsh = wasm_v128_bitselect(vnanh, vnonsignh, vnanmaskh);
76 
77     const v128_t vh = wasm_v128_or(vabsh, vsignh);
78 
79     wasm_v128_store(o, vh);
80     o += 8;
81   }
82 }
83