1 // Auto-generated file. Do not edit!
2 // Template: src/f32-f16-vcvt/wasmsimd.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <wasm_simd128.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/vcvt.h>
16
17
xnn_f32_f16_vcvt_ukernel__wasmsimd_x16(size_t n,const float * input,void * output,const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_f16_vcvt_ukernel__wasmsimd_x16(
19 size_t n,
20 const float* input,
21 void* output,
22 const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
23 {
24 assert(n != 0);
25 assert(n % sizeof(float) == 0);
26 assert(input != NULL);
27 assert(output != NULL);
28
29 const v128_t vexp_bias = wasm_v128_load64_splat(params->wasmsimd.exp_bias);
30 const v128_t vscale_to_inf = wasm_v128_load64_splat(params->wasmsimd.scale_to_inf);
31 const v128_t vexpw_max = wasm_v128_load64_splat(params->wasmsimd.expw_max);
32 const v128_t vscale_to_zero = wasm_v128_load64_splat(params->wasmsimd.scale_to_zero);
33 const v128_t vbias_min = wasm_v128_load64_splat(params->wasmsimd.bias_min);
34 const v128_t vmanth_mask = wasm_v128_load64_splat(params->wasmsimd.manth_mask);
35 const v128_t vexph_mask = wasm_v128_load64_splat(params->wasmsimd.exph_mask);
36 const v128_t vnanh = wasm_v128_load64_splat(params->wasmsimd.nanh);
37
38 uint16_t* o = (uint16_t*) output;
39 for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
40 const v128_t vx0 = wasm_v128_load(input);
41 const v128_t vx1 = wasm_v128_load(input + 4);
42 const v128_t vx2 = wasm_v128_load(input + 8);
43 const v128_t vx3 = wasm_v128_load(input + 12);
44 input += 16;
45
46 const v128_t vabsx0 = wasm_f32x4_abs(vx0);
47 const v128_t vabsx1 = wasm_f32x4_abs(vx1);
48 const v128_t vabsx2 = wasm_f32x4_abs(vx2);
49 const v128_t vabsx3 = wasm_f32x4_abs(vx3);
50
51 const v128_t vsignx0 = wasm_v128_xor(vx0, vabsx0);
52 const v128_t vsignx1 = wasm_v128_xor(vx1, vabsx1);
53 const v128_t vsignx2 = wasm_v128_xor(vx2, vabsx2);
54 const v128_t vsignx3 = wasm_v128_xor(vx3, vabsx3);
55
56 v128_t vbias0 = wasm_i32x4_add(vabsx0, vexp_bias);
57 v128_t vbias1 = wasm_i32x4_add(vabsx1, vexp_bias);
58 v128_t vbias2 = wasm_i32x4_add(vabsx2, vexp_bias);
59 v128_t vbias3 = wasm_i32x4_add(vabsx3, vexp_bias);
60
61 v128_t vf0 = wasm_f32x4_mul(vabsx0, vscale_to_inf);
62 v128_t vf1 = wasm_f32x4_mul(vabsx1, vscale_to_inf);
63 v128_t vf2 = wasm_f32x4_mul(vabsx2, vscale_to_inf);
64 v128_t vf3 = wasm_f32x4_mul(vabsx3, vscale_to_inf);
65
66 const v128_t vnanmaskw0 = wasm_i32x4_gt(vabsx0, vexpw_max);
67 const v128_t vnanmaskw1 = wasm_i32x4_gt(vabsx1, vexpw_max);
68 const v128_t vnanmaskw2 = wasm_i32x4_gt(vabsx2, vexpw_max);
69 const v128_t vnanmaskw3 = wasm_i32x4_gt(vabsx3, vexpw_max);
70
71 vbias0 = wasm_v128_and(vbias0, vexpw_max);
72 vbias1 = wasm_v128_and(vbias1, vexpw_max);
73 vbias2 = wasm_v128_and(vbias2, vexpw_max);
74 vbias3 = wasm_v128_and(vbias3, vexpw_max);
75
76 vf0 = wasm_f32x4_mul(vf0, vscale_to_zero);
77 vf1 = wasm_f32x4_mul(vf1, vscale_to_zero);
78 vf2 = wasm_f32x4_mul(vf2, vscale_to_zero);
79 vf3 = wasm_f32x4_mul(vf3, vscale_to_zero);
80
81 const v128_t vnanmaskh0 = wasm_i16x8_narrow_i32x4(vnanmaskw0, vnanmaskw1);
82 const v128_t vnanmaskh1 = wasm_i16x8_narrow_i32x4(vnanmaskw2, vnanmaskw3);
83
84 const v128_t vsignh0 = wasm_i16x8_narrow_i32x4(vsignx0, vsignx1);
85 const v128_t vsignh1 = wasm_i16x8_narrow_i32x4(vsignx2, vsignx3);
86
87 vbias0 = wasm_i16x8_max(vbias0, vbias_min);
88 vbias1 = wasm_i16x8_max(vbias1, vbias_min);
89 vbias2 = wasm_i16x8_max(vbias2, vbias_min);
90 vbias3 = wasm_i16x8_max(vbias3, vbias_min);
91
92 vf0 = wasm_f32x4_add(vf0, vbias0);
93 vf1 = wasm_f32x4_add(vf1, vbias1);
94 vf2 = wasm_f32x4_add(vf2, vbias2);
95 vf3 = wasm_f32x4_add(vf3, vbias3);
96
97 v128_t vexpw0 = wasm_i32x4_shr(vf0, 13);
98 v128_t vexpw1 = wasm_i32x4_shr(vf1, 13);
99 v128_t vexpw2 = wasm_i32x4_shr(vf2, 13);
100 v128_t vexpw3 = wasm_i32x4_shr(vf3, 13);
101
102 const v128_t vmantw0 = wasm_v128_and(vf0, vmanth_mask);
103 const v128_t vmantw1 = wasm_v128_and(vf1, vmanth_mask);
104 const v128_t vmantw2 = wasm_v128_and(vf2, vmanth_mask);
105 const v128_t vmantw3 = wasm_v128_and(vf3, vmanth_mask);
106
107 vexpw0 = wasm_v128_and(vexpw0, vexph_mask);
108 vexpw1 = wasm_v128_and(vexpw1, vexph_mask);
109 vexpw2 = wasm_v128_and(vexpw2, vexph_mask);
110 vexpw3 = wasm_v128_and(vexpw3, vexph_mask);
111
112 const v128_t vnonsignw0 = wasm_i32x4_add(vmantw0, vexpw0);
113 const v128_t vnonsignw1 = wasm_i32x4_add(vmantw1, vexpw1);
114 const v128_t vnonsignw2 = wasm_i32x4_add(vmantw2, vexpw2);
115 const v128_t vnonsignw3 = wasm_i32x4_add(vmantw3, vexpw3);
116
117 const v128_t vnonsignh0 = wasm_i16x8_narrow_i32x4(vnonsignw0, vnonsignw1);
118 const v128_t vnonsignh1 = wasm_i16x8_narrow_i32x4(vnonsignw2, vnonsignw3);
119
120 const v128_t vabsh0 = wasm_v128_bitselect(vnanh, vnonsignh0, vnanmaskh0);
121 const v128_t vabsh1 = wasm_v128_bitselect(vnanh, vnonsignh1, vnanmaskh1);
122
123 const v128_t vh0 = wasm_v128_or(vabsh0, vsignh0);
124 const v128_t vh1 = wasm_v128_or(vabsh1, vsignh1);
125
126 wasm_v128_store(o, vh0);
127 wasm_v128_store(o + 8, vh1);
128 o += 16;
129 }
130 for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
131 const v128_t vx_lo = wasm_v128_load(input);
132 const v128_t vx_hi = wasm_v128_load(input + 4);
133 input += 8;
134
135 const v128_t vabsx_lo = wasm_f32x4_abs(vx_lo);
136 const v128_t vabsx_hi = wasm_f32x4_abs(vx_hi);
137
138 const v128_t vsignx_lo = wasm_v128_xor(vx_lo, vabsx_lo);
139 const v128_t vsignx_hi = wasm_v128_xor(vx_hi, vabsx_hi);
140 v128_t vbias_lo = wasm_i32x4_add(vabsx_lo, vexp_bias);
141 v128_t vbias_hi = wasm_i32x4_add(vabsx_hi, vexp_bias);
142 v128_t vf_lo = wasm_f32x4_mul(vabsx_lo, vscale_to_inf);
143 v128_t vf_hi = wasm_f32x4_mul(vabsx_hi, vscale_to_inf);
144 const v128_t vnanmaskw_lo = wasm_i32x4_gt(vabsx_lo, vexpw_max);
145 const v128_t vnanmaskw_hi = wasm_i32x4_gt(vabsx_hi, vexpw_max);
146
147 vbias_lo = wasm_v128_and(vbias_lo, vexpw_max);
148 vbias_hi = wasm_v128_and(vbias_hi, vexpw_max);
149 vf_lo = wasm_f32x4_mul(vf_lo, vscale_to_zero);
150 vf_hi = wasm_f32x4_mul(vf_hi, vscale_to_zero);
151 const v128_t vnanmaskh = wasm_i16x8_narrow_i32x4(vnanmaskw_lo, vnanmaskw_hi);
152 const v128_t vsignh = wasm_i16x8_narrow_i32x4(vsignx_lo, vsignx_hi);
153
154 vbias_lo = wasm_i16x8_max(vbias_lo, vbias_min);
155 vbias_hi = wasm_i16x8_max(vbias_hi, vbias_min);
156
157 vf_lo = wasm_f32x4_add(vf_lo, vbias_lo);
158 vf_hi = wasm_f32x4_add(vf_hi, vbias_hi);
159
160 v128_t vexpw_lo = wasm_i32x4_shr(vf_lo, 13);
161 v128_t vexpw_hi = wasm_i32x4_shr(vf_hi, 13);
162 const v128_t vmantw_lo = wasm_v128_and(vf_lo, vmanth_mask);
163 const v128_t vmantw_hi = wasm_v128_and(vf_hi, vmanth_mask);
164
165 vexpw_lo = wasm_v128_and(vexpw_lo, vexph_mask);
166 vexpw_hi = wasm_v128_and(vexpw_hi, vexph_mask);
167
168 const v128_t vnonsignw_lo = wasm_i32x4_add(vmantw_lo, vexpw_lo);
169 const v128_t vnonsignw_hi = wasm_i32x4_add(vmantw_hi, vexpw_hi);
170
171 const v128_t vnonsignh = wasm_i16x8_narrow_i32x4(vnonsignw_lo, vnonsignw_hi);
172
173 const v128_t vabsh = wasm_v128_bitselect(vnanh, vnonsignh, vnanmaskh);
174
175 const v128_t vh = wasm_v128_or(vabsh, vsignh);
176
177 wasm_v128_store(o, vh);
178 o += 8;
179 }
180 if XNN_UNPREDICTABLE(n != 0) {
181 const v128_t vx_lo = wasm_v128_load(input);
182 const float* input_hi = (const float*) ((uintptr_t) input + (n & (4 * sizeof(float))));
183 const v128_t vx_hi = wasm_v128_load(input_hi);
184
185 const v128_t vabsx_lo = wasm_f32x4_abs(vx_lo);
186 const v128_t vabsx_hi = wasm_f32x4_abs(vx_hi);
187
188 const v128_t vsignx_lo = wasm_v128_xor(vx_lo, vabsx_lo);
189 const v128_t vsignx_hi = wasm_v128_xor(vx_hi, vabsx_hi);
190 v128_t vbias_lo = wasm_i32x4_add(vabsx_lo, vexp_bias);
191 v128_t vbias_hi = wasm_i32x4_add(vabsx_hi, vexp_bias);
192 v128_t vf_lo = wasm_f32x4_mul(vabsx_lo, vscale_to_inf);
193 v128_t vf_hi = wasm_f32x4_mul(vabsx_hi, vscale_to_inf);
194 const v128_t vnanmaskw_lo = wasm_i32x4_gt(vabsx_lo, vexpw_max);
195 const v128_t vnanmaskw_hi = wasm_i32x4_gt(vabsx_hi, vexpw_max);
196
197 vbias_lo = wasm_v128_and(vbias_lo, vexpw_max);
198 vbias_hi = wasm_v128_and(vbias_hi, vexpw_max);
199 vf_lo = wasm_f32x4_mul(vf_lo, vscale_to_zero);
200 vf_hi = wasm_f32x4_mul(vf_hi, vscale_to_zero);
201 const v128_t vnanmaskh = wasm_i16x8_narrow_i32x4(vnanmaskw_lo, vnanmaskw_hi);
202 const v128_t vsignh = wasm_i16x8_narrow_i32x4(vsignx_lo, vsignx_hi);
203
204 vbias_lo = wasm_i16x8_max(vbias_lo, vbias_min);
205 vbias_hi = wasm_i16x8_max(vbias_hi, vbias_min);
206
207 vf_lo = wasm_f32x4_add(vf_lo, vbias_lo);
208 vf_hi = wasm_f32x4_add(vf_hi, vbias_hi);
209
210 v128_t vexpw_lo = wasm_i32x4_shr(vf_lo, 13);
211 v128_t vexpw_hi = wasm_i32x4_shr(vf_hi, 13);
212 const v128_t vmantw_lo = wasm_v128_and(vf_lo, vmanth_mask);
213 const v128_t vmantw_hi = wasm_v128_and(vf_hi, vmanth_mask);
214
215 vexpw_lo = wasm_v128_and(vexpw_lo, vexph_mask);
216 vexpw_hi = wasm_v128_and(vexpw_hi, vexph_mask);
217
218 const v128_t vnonsignw_lo = wasm_i32x4_add(vmantw_lo, vexpw_lo);
219 const v128_t vnonsignw_hi = wasm_i32x4_add(vmantw_hi, vexpw_hi);
220
221 const v128_t vnonsignh = wasm_i16x8_narrow_i32x4(vnonsignw_lo, vnonsignw_hi);
222
223 const v128_t vabsh = wasm_v128_bitselect(vnanh, vnonsignh, vnanmaskh);
224
225 v128_t vh = wasm_v128_or(vabsh, vsignh);
226
227 if (n & (4 * sizeof(float))) {
228 *((double*) o) = wasm_f64x2_extract_lane(vh, 0);
229 vh = wasm_v64x2_shuffle(vh, vh, 1, 1);
230 o += 4;
231 }
232 if (n & (2 * sizeof(float))) {
233 *((float*) o) = (float) wasm_f32x4_extract_lane(vh, 0);
234 vh = wasm_i64x2_shr(vh, 32);
235 o += 2;
236 }
237 const uint32_t vh_lo = wasm_i32x4_extract_lane(vh, 0);
238 if (n & (1 * sizeof(float))) {
239 *o = (uint16_t) vh_lo;
240 }
241 }
242 }
243