1 // Auto-generated file. Do not edit!
2 // Template: src/f32-f16-vcvt/wasmsimd.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <wasm_simd128.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/vcvt.h>
16
17
xnn_f32_f16_vcvt_ukernel__wasmsimd_x8(size_t n,const float * input,void * output,const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_f16_vcvt_ukernel__wasmsimd_x8(
19 size_t n,
20 const float* input,
21 void* output,
22 const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
23 {
24 assert(n != 0);
25 assert(n % sizeof(float) == 0);
26 assert(input != NULL);
27 assert(output != NULL);
28
29 const v128_t vexp_bias = wasm_v128_load64_splat(params->wasmsimd.exp_bias);
30 const v128_t vscale_to_inf = wasm_v128_load64_splat(params->wasmsimd.scale_to_inf);
31 const v128_t vexpw_max = wasm_v128_load64_splat(params->wasmsimd.expw_max);
32 const v128_t vscale_to_zero = wasm_v128_load64_splat(params->wasmsimd.scale_to_zero);
33 const v128_t vbias_min = wasm_v128_load64_splat(params->wasmsimd.bias_min);
34 const v128_t vmanth_mask = wasm_v128_load64_splat(params->wasmsimd.manth_mask);
35 const v128_t vexph_mask = wasm_v128_load64_splat(params->wasmsimd.exph_mask);
36 const v128_t vnanh = wasm_v128_load64_splat(params->wasmsimd.nanh);
37
38 uint16_t* o = (uint16_t*) output;
39 for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
40 const v128_t vx_lo = wasm_v128_load(input);
41 const v128_t vx_hi = wasm_v128_load(input + 4);
42 input += 8;
43
44 const v128_t vabsx_lo = wasm_f32x4_abs(vx_lo);
45 const v128_t vabsx_hi = wasm_f32x4_abs(vx_hi);
46
47 const v128_t vsignx_lo = wasm_v128_xor(vx_lo, vabsx_lo);
48 const v128_t vsignx_hi = wasm_v128_xor(vx_hi, vabsx_hi);
49 v128_t vbias_lo = wasm_i32x4_add(vabsx_lo, vexp_bias);
50 v128_t vbias_hi = wasm_i32x4_add(vabsx_hi, vexp_bias);
51 v128_t vf_lo = wasm_f32x4_mul(vabsx_lo, vscale_to_inf);
52 v128_t vf_hi = wasm_f32x4_mul(vabsx_hi, vscale_to_inf);
53 const v128_t vnanmaskw_lo = wasm_i32x4_gt(vabsx_lo, vexpw_max);
54 const v128_t vnanmaskw_hi = wasm_i32x4_gt(vabsx_hi, vexpw_max);
55
56 vbias_lo = wasm_v128_and(vbias_lo, vexpw_max);
57 vbias_hi = wasm_v128_and(vbias_hi, vexpw_max);
58 vf_lo = wasm_f32x4_mul(vf_lo, vscale_to_zero);
59 vf_hi = wasm_f32x4_mul(vf_hi, vscale_to_zero);
60 const v128_t vnanmaskh = wasm_i16x8_narrow_i32x4(vnanmaskw_lo, vnanmaskw_hi);
61 const v128_t vsignh = wasm_i16x8_narrow_i32x4(vsignx_lo, vsignx_hi);
62
63 vbias_lo = wasm_i16x8_max(vbias_lo, vbias_min);
64 vbias_hi = wasm_i16x8_max(vbias_hi, vbias_min);
65
66 vf_lo = wasm_f32x4_add(vf_lo, vbias_lo);
67 vf_hi = wasm_f32x4_add(vf_hi, vbias_hi);
68
69 v128_t vexpw_lo = wasm_i32x4_shr(vf_lo, 13);
70 v128_t vexpw_hi = wasm_i32x4_shr(vf_hi, 13);
71 const v128_t vmantw_lo = wasm_v128_and(vf_lo, vmanth_mask);
72 const v128_t vmantw_hi = wasm_v128_and(vf_hi, vmanth_mask);
73
74 vexpw_lo = wasm_v128_and(vexpw_lo, vexph_mask);
75 vexpw_hi = wasm_v128_and(vexpw_hi, vexph_mask);
76
77 const v128_t vnonsignw_lo = wasm_i32x4_add(vmantw_lo, vexpw_lo);
78 const v128_t vnonsignw_hi = wasm_i32x4_add(vmantw_hi, vexpw_hi);
79
80 const v128_t vnonsignh = wasm_i16x8_narrow_i32x4(vnonsignw_lo, vnonsignw_hi);
81
82 const v128_t vabsh = wasm_v128_bitselect(vnanh, vnonsignh, vnanmaskh);
83
84 const v128_t vh = wasm_v128_or(vabsh, vsignh);
85
86 wasm_v128_store(o, vh);
87 o += 8;
88 }
89 if XNN_UNPREDICTABLE(n != 0) {
90 const v128_t vx_lo = wasm_v128_load(input);
91 const float* input_hi = (const float*) ((uintptr_t) input + (n & (4 * sizeof(float))));
92 const v128_t vx_hi = wasm_v128_load(input_hi);
93
94 const v128_t vabsx_lo = wasm_f32x4_abs(vx_lo);
95 const v128_t vabsx_hi = wasm_f32x4_abs(vx_hi);
96
97 const v128_t vsignx_lo = wasm_v128_xor(vx_lo, vabsx_lo);
98 const v128_t vsignx_hi = wasm_v128_xor(vx_hi, vabsx_hi);
99 v128_t vbias_lo = wasm_i32x4_add(vabsx_lo, vexp_bias);
100 v128_t vbias_hi = wasm_i32x4_add(vabsx_hi, vexp_bias);
101 v128_t vf_lo = wasm_f32x4_mul(vabsx_lo, vscale_to_inf);
102 v128_t vf_hi = wasm_f32x4_mul(vabsx_hi, vscale_to_inf);
103 const v128_t vnanmaskw_lo = wasm_i32x4_gt(vabsx_lo, vexpw_max);
104 const v128_t vnanmaskw_hi = wasm_i32x4_gt(vabsx_hi, vexpw_max);
105
106 vbias_lo = wasm_v128_and(vbias_lo, vexpw_max);
107 vbias_hi = wasm_v128_and(vbias_hi, vexpw_max);
108 vf_lo = wasm_f32x4_mul(vf_lo, vscale_to_zero);
109 vf_hi = wasm_f32x4_mul(vf_hi, vscale_to_zero);
110 const v128_t vnanmaskh = wasm_i16x8_narrow_i32x4(vnanmaskw_lo, vnanmaskw_hi);
111 const v128_t vsignh = wasm_i16x8_narrow_i32x4(vsignx_lo, vsignx_hi);
112
113 vbias_lo = wasm_i16x8_max(vbias_lo, vbias_min);
114 vbias_hi = wasm_i16x8_max(vbias_hi, vbias_min);
115
116 vf_lo = wasm_f32x4_add(vf_lo, vbias_lo);
117 vf_hi = wasm_f32x4_add(vf_hi, vbias_hi);
118
119 v128_t vexpw_lo = wasm_i32x4_shr(vf_lo, 13);
120 v128_t vexpw_hi = wasm_i32x4_shr(vf_hi, 13);
121 const v128_t vmantw_lo = wasm_v128_and(vf_lo, vmanth_mask);
122 const v128_t vmantw_hi = wasm_v128_and(vf_hi, vmanth_mask);
123
124 vexpw_lo = wasm_v128_and(vexpw_lo, vexph_mask);
125 vexpw_hi = wasm_v128_and(vexpw_hi, vexph_mask);
126
127 const v128_t vnonsignw_lo = wasm_i32x4_add(vmantw_lo, vexpw_lo);
128 const v128_t vnonsignw_hi = wasm_i32x4_add(vmantw_hi, vexpw_hi);
129
130 const v128_t vnonsignh = wasm_i16x8_narrow_i32x4(vnonsignw_lo, vnonsignw_hi);
131
132 const v128_t vabsh = wasm_v128_bitselect(vnanh, vnonsignh, vnanmaskh);
133
134 v128_t vh = wasm_v128_or(vabsh, vsignh);
135
136 if (n & (4 * sizeof(float))) {
137 *((double*) o) = wasm_f64x2_extract_lane(vh, 0);
138 vh = wasm_v64x2_shuffle(vh, vh, 1, 1);
139 o += 4;
140 }
141 if (n & (2 * sizeof(float))) {
142 *((float*) o) = (float) wasm_f32x4_extract_lane(vh, 0);
143 vh = wasm_i64x2_shr(vh, 32);
144 o += 2;
145 }
146 const uint32_t vh_lo = wasm_i32x4_extract_lane(vh, 0);
147 if (n & (1 * sizeof(float))) {
148 *o = (uint16_t) vh_lo;
149 }
150 }
151 }
152