xref: /aosp_15_r20/external/XNNPACK/src/f32-f16-vcvt/gen/vcvt-wasmsimd-x24.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-f16-vcvt/wasmsimd.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/vcvt.h>
16 
17 
xnn_f32_f16_vcvt_ukernel__wasmsimd_x24(size_t n,const float * input,void * output,const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_f16_vcvt_ukernel__wasmsimd_x24(
19     size_t n,
20     const float* input,
21     void* output,
22     const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
23 {
24   assert(n != 0);
25   assert(n % sizeof(float) == 0);
26   assert(input != NULL);
27   assert(output != NULL);
28 
29   const v128_t vexp_bias = wasm_v128_load64_splat(params->wasmsimd.exp_bias);
30   const v128_t vscale_to_inf = wasm_v128_load64_splat(params->wasmsimd.scale_to_inf);
31   const v128_t vexpw_max = wasm_v128_load64_splat(params->wasmsimd.expw_max);
32   const v128_t vscale_to_zero = wasm_v128_load64_splat(params->wasmsimd.scale_to_zero);
33   const v128_t vbias_min = wasm_v128_load64_splat(params->wasmsimd.bias_min);
34   const v128_t vmanth_mask = wasm_v128_load64_splat(params->wasmsimd.manth_mask);
35   const v128_t vexph_mask = wasm_v128_load64_splat(params->wasmsimd.exph_mask);
36   const v128_t vnanh = wasm_v128_load64_splat(params->wasmsimd.nanh);
37 
38   uint16_t* o = (uint16_t*) output;
39   for (; n >= 24 * sizeof(float); n -= 24 * sizeof(float)) {
40     const v128_t vx0 = wasm_v128_load(input);
41     const v128_t vx1 = wasm_v128_load(input + 4);
42     const v128_t vx2 = wasm_v128_load(input + 8);
43     const v128_t vx3 = wasm_v128_load(input + 12);
44     const v128_t vx4 = wasm_v128_load(input + 16);
45     const v128_t vx5 = wasm_v128_load(input + 20);
46     input += 24;
47 
48     const v128_t vabsx0 = wasm_f32x4_abs(vx0);
49     const v128_t vabsx1 = wasm_f32x4_abs(vx1);
50     const v128_t vabsx2 = wasm_f32x4_abs(vx2);
51     const v128_t vabsx3 = wasm_f32x4_abs(vx3);
52     const v128_t vabsx4 = wasm_f32x4_abs(vx4);
53     const v128_t vabsx5 = wasm_f32x4_abs(vx5);
54 
55     const v128_t vsignx0 = wasm_v128_xor(vx0, vabsx0);
56     const v128_t vsignx1 = wasm_v128_xor(vx1, vabsx1);
57     const v128_t vsignx2 = wasm_v128_xor(vx2, vabsx2);
58     const v128_t vsignx3 = wasm_v128_xor(vx3, vabsx3);
59     const v128_t vsignx4 = wasm_v128_xor(vx4, vabsx4);
60     const v128_t vsignx5 = wasm_v128_xor(vx5, vabsx5);
61 
62     v128_t vbias0 = wasm_i32x4_add(vabsx0, vexp_bias);
63     v128_t vbias1 = wasm_i32x4_add(vabsx1, vexp_bias);
64     v128_t vbias2 = wasm_i32x4_add(vabsx2, vexp_bias);
65     v128_t vbias3 = wasm_i32x4_add(vabsx3, vexp_bias);
66     v128_t vbias4 = wasm_i32x4_add(vabsx4, vexp_bias);
67     v128_t vbias5 = wasm_i32x4_add(vabsx5, vexp_bias);
68 
69     v128_t vf0 = wasm_f32x4_mul(vabsx0, vscale_to_inf);
70     v128_t vf1 = wasm_f32x4_mul(vabsx1, vscale_to_inf);
71     v128_t vf2 = wasm_f32x4_mul(vabsx2, vscale_to_inf);
72     v128_t vf3 = wasm_f32x4_mul(vabsx3, vscale_to_inf);
73     v128_t vf4 = wasm_f32x4_mul(vabsx4, vscale_to_inf);
74     v128_t vf5 = wasm_f32x4_mul(vabsx5, vscale_to_inf);
75 
76     const v128_t vnanmaskw0 = wasm_i32x4_gt(vabsx0, vexpw_max);
77     const v128_t vnanmaskw1 = wasm_i32x4_gt(vabsx1, vexpw_max);
78     const v128_t vnanmaskw2 = wasm_i32x4_gt(vabsx2, vexpw_max);
79     const v128_t vnanmaskw3 = wasm_i32x4_gt(vabsx3, vexpw_max);
80     const v128_t vnanmaskw4 = wasm_i32x4_gt(vabsx4, vexpw_max);
81     const v128_t vnanmaskw5 = wasm_i32x4_gt(vabsx5, vexpw_max);
82 
83     vbias0 = wasm_v128_and(vbias0, vexpw_max);
84     vbias1 = wasm_v128_and(vbias1, vexpw_max);
85     vbias2 = wasm_v128_and(vbias2, vexpw_max);
86     vbias3 = wasm_v128_and(vbias3, vexpw_max);
87     vbias4 = wasm_v128_and(vbias4, vexpw_max);
88     vbias5 = wasm_v128_and(vbias5, vexpw_max);
89 
90     vf0 = wasm_f32x4_mul(vf0, vscale_to_zero);
91     vf1 = wasm_f32x4_mul(vf1, vscale_to_zero);
92     vf2 = wasm_f32x4_mul(vf2, vscale_to_zero);
93     vf3 = wasm_f32x4_mul(vf3, vscale_to_zero);
94     vf4 = wasm_f32x4_mul(vf4, vscale_to_zero);
95     vf5 = wasm_f32x4_mul(vf5, vscale_to_zero);
96 
97     const v128_t vnanmaskh0 = wasm_i16x8_narrow_i32x4(vnanmaskw0, vnanmaskw1);
98     const v128_t vnanmaskh1 = wasm_i16x8_narrow_i32x4(vnanmaskw2, vnanmaskw3);
99     const v128_t vnanmaskh2 = wasm_i16x8_narrow_i32x4(vnanmaskw4, vnanmaskw5);
100 
101     const v128_t vsignh0 = wasm_i16x8_narrow_i32x4(vsignx0, vsignx1);
102     const v128_t vsignh1 = wasm_i16x8_narrow_i32x4(vsignx2, vsignx3);
103     const v128_t vsignh2 = wasm_i16x8_narrow_i32x4(vsignx4, vsignx5);
104 
105     vbias0 = wasm_i16x8_max(vbias0, vbias_min);
106     vbias1 = wasm_i16x8_max(vbias1, vbias_min);
107     vbias2 = wasm_i16x8_max(vbias2, vbias_min);
108     vbias3 = wasm_i16x8_max(vbias3, vbias_min);
109     vbias4 = wasm_i16x8_max(vbias4, vbias_min);
110     vbias5 = wasm_i16x8_max(vbias5, vbias_min);
111 
112     vf0 = wasm_f32x4_add(vf0, vbias0);
113     vf1 = wasm_f32x4_add(vf1, vbias1);
114     vf2 = wasm_f32x4_add(vf2, vbias2);
115     vf3 = wasm_f32x4_add(vf3, vbias3);
116     vf4 = wasm_f32x4_add(vf4, vbias4);
117     vf5 = wasm_f32x4_add(vf5, vbias5);
118 
119     v128_t vexpw0 = wasm_i32x4_shr(vf0, 13);
120     v128_t vexpw1 = wasm_i32x4_shr(vf1, 13);
121     v128_t vexpw2 = wasm_i32x4_shr(vf2, 13);
122     v128_t vexpw3 = wasm_i32x4_shr(vf3, 13);
123     v128_t vexpw4 = wasm_i32x4_shr(vf4, 13);
124     v128_t vexpw5 = wasm_i32x4_shr(vf5, 13);
125 
126     const v128_t vmantw0 = wasm_v128_and(vf0, vmanth_mask);
127     const v128_t vmantw1 = wasm_v128_and(vf1, vmanth_mask);
128     const v128_t vmantw2 = wasm_v128_and(vf2, vmanth_mask);
129     const v128_t vmantw3 = wasm_v128_and(vf3, vmanth_mask);
130     const v128_t vmantw4 = wasm_v128_and(vf4, vmanth_mask);
131     const v128_t vmantw5 = wasm_v128_and(vf5, vmanth_mask);
132 
133     vexpw0 = wasm_v128_and(vexpw0, vexph_mask);
134     vexpw1 = wasm_v128_and(vexpw1, vexph_mask);
135     vexpw2 = wasm_v128_and(vexpw2, vexph_mask);
136     vexpw3 = wasm_v128_and(vexpw3, vexph_mask);
137     vexpw4 = wasm_v128_and(vexpw4, vexph_mask);
138     vexpw5 = wasm_v128_and(vexpw5, vexph_mask);
139 
140     const v128_t vnonsignw0 = wasm_i32x4_add(vmantw0, vexpw0);
141     const v128_t vnonsignw1 = wasm_i32x4_add(vmantw1, vexpw1);
142     const v128_t vnonsignw2 = wasm_i32x4_add(vmantw2, vexpw2);
143     const v128_t vnonsignw3 = wasm_i32x4_add(vmantw3, vexpw3);
144     const v128_t vnonsignw4 = wasm_i32x4_add(vmantw4, vexpw4);
145     const v128_t vnonsignw5 = wasm_i32x4_add(vmantw5, vexpw5);
146 
147     const v128_t vnonsignh0 = wasm_i16x8_narrow_i32x4(vnonsignw0, vnonsignw1);
148     const v128_t vnonsignh1 = wasm_i16x8_narrow_i32x4(vnonsignw2, vnonsignw3);
149     const v128_t vnonsignh2 = wasm_i16x8_narrow_i32x4(vnonsignw4, vnonsignw5);
150 
151     const v128_t vabsh0 = wasm_v128_bitselect(vnanh, vnonsignh0, vnanmaskh0);
152     const v128_t vabsh1 = wasm_v128_bitselect(vnanh, vnonsignh1, vnanmaskh1);
153     const v128_t vabsh2 = wasm_v128_bitselect(vnanh, vnonsignh2, vnanmaskh2);
154 
155     const v128_t vh0 = wasm_v128_or(vabsh0, vsignh0);
156     const v128_t vh1 = wasm_v128_or(vabsh1, vsignh1);
157     const v128_t vh2 = wasm_v128_or(vabsh2, vsignh2);
158 
159     wasm_v128_store(o, vh0);
160     wasm_v128_store(o + 8, vh1);
161     wasm_v128_store(o + 16, vh2);
162     o += 24;
163   }
164   for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
165     const v128_t vx_lo = wasm_v128_load(input);
166     const v128_t vx_hi = wasm_v128_load(input + 4);
167     input += 8;
168 
169     const v128_t vabsx_lo = wasm_f32x4_abs(vx_lo);
170     const v128_t vabsx_hi = wasm_f32x4_abs(vx_hi);
171 
172     const v128_t vsignx_lo = wasm_v128_xor(vx_lo, vabsx_lo);
173     const v128_t vsignx_hi = wasm_v128_xor(vx_hi, vabsx_hi);
174     v128_t vbias_lo = wasm_i32x4_add(vabsx_lo, vexp_bias);
175     v128_t vbias_hi = wasm_i32x4_add(vabsx_hi, vexp_bias);
176     v128_t vf_lo = wasm_f32x4_mul(vabsx_lo, vscale_to_inf);
177     v128_t vf_hi = wasm_f32x4_mul(vabsx_hi, vscale_to_inf);
178     const v128_t vnanmaskw_lo = wasm_i32x4_gt(vabsx_lo, vexpw_max);
179     const v128_t vnanmaskw_hi = wasm_i32x4_gt(vabsx_hi, vexpw_max);
180 
181     vbias_lo = wasm_v128_and(vbias_lo, vexpw_max);
182     vbias_hi = wasm_v128_and(vbias_hi, vexpw_max);
183     vf_lo = wasm_f32x4_mul(vf_lo, vscale_to_zero);
184     vf_hi = wasm_f32x4_mul(vf_hi, vscale_to_zero);
185     const v128_t vnanmaskh = wasm_i16x8_narrow_i32x4(vnanmaskw_lo, vnanmaskw_hi);
186     const v128_t vsignh = wasm_i16x8_narrow_i32x4(vsignx_lo, vsignx_hi);
187 
188     vbias_lo = wasm_i16x8_max(vbias_lo, vbias_min);
189     vbias_hi = wasm_i16x8_max(vbias_hi, vbias_min);
190 
191     vf_lo = wasm_f32x4_add(vf_lo, vbias_lo);
192     vf_hi = wasm_f32x4_add(vf_hi, vbias_hi);
193 
194     v128_t vexpw_lo = wasm_i32x4_shr(vf_lo, 13);
195     v128_t vexpw_hi = wasm_i32x4_shr(vf_hi, 13);
196     const v128_t vmantw_lo = wasm_v128_and(vf_lo, vmanth_mask);
197     const v128_t vmantw_hi = wasm_v128_and(vf_hi, vmanth_mask);
198 
199     vexpw_lo = wasm_v128_and(vexpw_lo, vexph_mask);
200     vexpw_hi = wasm_v128_and(vexpw_hi, vexph_mask);
201 
202     const v128_t vnonsignw_lo = wasm_i32x4_add(vmantw_lo, vexpw_lo);
203     const v128_t vnonsignw_hi = wasm_i32x4_add(vmantw_hi, vexpw_hi);
204 
205     const v128_t vnonsignh = wasm_i16x8_narrow_i32x4(vnonsignw_lo, vnonsignw_hi);
206 
207     const v128_t vabsh = wasm_v128_bitselect(vnanh, vnonsignh, vnanmaskh);
208 
209     const v128_t vh = wasm_v128_or(vabsh, vsignh);
210 
211     wasm_v128_store(o, vh);
212     o += 8;
213   }
214   if XNN_UNPREDICTABLE(n != 0) {
215     const v128_t vx_lo = wasm_v128_load(input);
216     const float* input_hi = (const float*) ((uintptr_t) input + (n & (4 * sizeof(float))));
217     const v128_t vx_hi = wasm_v128_load(input_hi);
218 
219     const v128_t vabsx_lo = wasm_f32x4_abs(vx_lo);
220     const v128_t vabsx_hi = wasm_f32x4_abs(vx_hi);
221 
222     const v128_t vsignx_lo = wasm_v128_xor(vx_lo, vabsx_lo);
223     const v128_t vsignx_hi = wasm_v128_xor(vx_hi, vabsx_hi);
224     v128_t vbias_lo = wasm_i32x4_add(vabsx_lo, vexp_bias);
225     v128_t vbias_hi = wasm_i32x4_add(vabsx_hi, vexp_bias);
226     v128_t vf_lo = wasm_f32x4_mul(vabsx_lo, vscale_to_inf);
227     v128_t vf_hi = wasm_f32x4_mul(vabsx_hi, vscale_to_inf);
228     const v128_t vnanmaskw_lo = wasm_i32x4_gt(vabsx_lo, vexpw_max);
229     const v128_t vnanmaskw_hi = wasm_i32x4_gt(vabsx_hi, vexpw_max);
230 
231     vbias_lo = wasm_v128_and(vbias_lo, vexpw_max);
232     vbias_hi = wasm_v128_and(vbias_hi, vexpw_max);
233     vf_lo = wasm_f32x4_mul(vf_lo, vscale_to_zero);
234     vf_hi = wasm_f32x4_mul(vf_hi, vscale_to_zero);
235     const v128_t vnanmaskh = wasm_i16x8_narrow_i32x4(vnanmaskw_lo, vnanmaskw_hi);
236     const v128_t vsignh = wasm_i16x8_narrow_i32x4(vsignx_lo, vsignx_hi);
237 
238     vbias_lo = wasm_i16x8_max(vbias_lo, vbias_min);
239     vbias_hi = wasm_i16x8_max(vbias_hi, vbias_min);
240 
241     vf_lo = wasm_f32x4_add(vf_lo, vbias_lo);
242     vf_hi = wasm_f32x4_add(vf_hi, vbias_hi);
243 
244     v128_t vexpw_lo = wasm_i32x4_shr(vf_lo, 13);
245     v128_t vexpw_hi = wasm_i32x4_shr(vf_hi, 13);
246     const v128_t vmantw_lo = wasm_v128_and(vf_lo, vmanth_mask);
247     const v128_t vmantw_hi = wasm_v128_and(vf_hi, vmanth_mask);
248 
249     vexpw_lo = wasm_v128_and(vexpw_lo, vexph_mask);
250     vexpw_hi = wasm_v128_and(vexpw_hi, vexph_mask);
251 
252     const v128_t vnonsignw_lo = wasm_i32x4_add(vmantw_lo, vexpw_lo);
253     const v128_t vnonsignw_hi = wasm_i32x4_add(vmantw_hi, vexpw_hi);
254 
255     const v128_t vnonsignh = wasm_i16x8_narrow_i32x4(vnonsignw_lo, vnonsignw_hi);
256 
257     const v128_t vabsh = wasm_v128_bitselect(vnanh, vnonsignh, vnanmaskh);
258 
259     v128_t vh = wasm_v128_or(vabsh, vsignh);
260 
261     if (n & (4 * sizeof(float))) {
262       *((double*) o) = wasm_f64x2_extract_lane(vh, 0);
263       vh = wasm_v64x2_shuffle(vh, vh, 1, 1);
264       o += 4;
265     }
266     if (n & (2 * sizeof(float))) {
267       *((float*) o) = (float) wasm_f32x4_extract_lane(vh, 0);
268       vh = wasm_i64x2_shr(vh, 32);
269       o += 2;
270     }
271     const uint32_t vh_lo = wasm_i32x4_extract_lane(vh, 0);
272     if (n & (1 * sizeof(float))) {
273       *o = (uint16_t) vh_lo;
274     }
275   }
276 }
277