xref: /aosp_15_r20/external/XNNPACK/src/f16-f32-vcvt/gen/vcvt-wasmsimd-int32-x24.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f16-f32-vcvt/wasmsimd-int32.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/vcvt.h>
16 
17 
xnn_f16_f32_vcvt_ukernel__wasmsimd_int32_x24(size_t n,const void * input,float * output,const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f16_f32_vcvt_ukernel__wasmsimd_int32_x24(
19     size_t n,
20     const void* input,
21     float* output,
22     const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
23 {
24   assert(n != 0);
25   assert(n % sizeof(uint16_t) == 0);
26   assert(input != NULL);
27   assert(output != NULL);
28 
29   const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_int32.sign_mask);
30   const v128_t vexp_offset = wasm_v128_load64_splat(params->wasmsimd_int32.exp_offset);
31   const v128_t vexp_scale = wasm_v128_load64_splat(params->wasmsimd_int32.exp_scale);
32   const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_int32.magic_bias);
33   const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_int32.denorm_cutoff);
34 
35   const uint16_t* i = (const uint16_t*) input;
36   for (; n >= 24 * sizeof(uint16_t); n -= 24 * sizeof(uint16_t)) {
37     const v128_t vh0 = wasm_v128_load(i);
38     const v128_t vh1 = wasm_v128_load(i + 8);
39     const v128_t vh2 = wasm_v128_load(i + 16);
40     i += 24;
41 
42     const v128_t vzero = wasm_i16x8_const_splat(0);
43     const v128_t vw0 = wasm_v16x8_shuffle(vzero, vh0, 0,  8, 1,  9, 2, 10, 3, 11);
44     const v128_t vw1 = wasm_v16x8_shuffle(vzero, vh0, 4, 12, 5, 13, 6, 14, 7, 15);
45     const v128_t vw2 = wasm_v16x8_shuffle(vzero, vh1, 0,  8, 1,  9, 2, 10, 3, 11);
46     const v128_t vw3 = wasm_v16x8_shuffle(vzero, vh1, 4, 12, 5, 13, 6, 14, 7, 15);
47     const v128_t vw4 = wasm_v16x8_shuffle(vzero, vh2, 0,  8, 1,  9, 2, 10, 3, 11);
48     const v128_t vw5 = wasm_v16x8_shuffle(vzero, vh2, 4, 12, 5, 13, 6, 14, 7, 15);
49 
50     const v128_t vsign0 = wasm_v128_and(vw0, vsign_mask);
51     const v128_t vsign1 = wasm_v128_and(vw1, vsign_mask);
52     const v128_t vsign2 = wasm_v128_and(vw2, vsign_mask);
53     const v128_t vsign3 = wasm_v128_and(vw3, vsign_mask);
54     const v128_t vsign4 = wasm_v128_and(vw4, vsign_mask);
55     const v128_t vsign5 = wasm_v128_and(vw5, vsign_mask);
56 
57     const v128_t vnonsign0 = wasm_v128_xor(vw0, vsign0);
58     const v128_t vnonsign1 = wasm_v128_xor(vw1, vsign1);
59     const v128_t vnonsign2 = wasm_v128_xor(vw2, vsign2);
60     const v128_t vnonsign3 = wasm_v128_xor(vw3, vsign3);
61     const v128_t vnonsign4 = wasm_v128_xor(vw4, vsign4);
62     const v128_t vnonsign5 = wasm_v128_xor(vw5, vsign5);
63 
64     const v128_t vnorm0 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign0, 3), vexp_offset), vexp_scale);
65     const v128_t vnorm1 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign1, 3), vexp_offset), vexp_scale);
66     const v128_t vnorm2 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign2, 3), vexp_offset), vexp_scale);
67     const v128_t vnorm3 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign3, 3), vexp_offset), vexp_scale);
68     const v128_t vnorm4 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign4, 3), vexp_offset), vexp_scale);
69     const v128_t vnorm5 = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign5, 3), vexp_offset), vexp_scale);
70 
71     const v128_t vdenorm0 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign0, 16), vmagic_bias), vmagic_bias);
72     const v128_t vdenorm1 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign1, 16), vmagic_bias), vmagic_bias);
73     const v128_t vdenorm2 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign2, 16), vmagic_bias), vmagic_bias);
74     const v128_t vdenorm3 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign3, 16), vmagic_bias), vmagic_bias);
75     const v128_t vdenorm4 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign4, 16), vmagic_bias), vmagic_bias);
76     const v128_t vdenorm5 = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign5, 16), vmagic_bias), vmagic_bias);
77 
78     const v128_t vxmask0 = wasm_i32x4_gt(vnonsign0, vdenorm_cutoff);
79     const v128_t vxmask1 = wasm_i32x4_gt(vnonsign1, vdenorm_cutoff);
80     const v128_t vxmask2 = wasm_i32x4_gt(vnonsign2, vdenorm_cutoff);
81     const v128_t vxmask3 = wasm_i32x4_gt(vnonsign3, vdenorm_cutoff);
82     const v128_t vxmask4 = wasm_i32x4_gt(vnonsign4, vdenorm_cutoff);
83     const v128_t vxmask5 = wasm_i32x4_gt(vnonsign5, vdenorm_cutoff);
84 
85     const v128_t vf0 = wasm_v128_or(vsign0, wasm_v128_bitselect(vnorm0, vdenorm0, vxmask0));
86     const v128_t vf1 = wasm_v128_or(vsign1, wasm_v128_bitselect(vnorm1, vdenorm1, vxmask1));
87     const v128_t vf2 = wasm_v128_or(vsign2, wasm_v128_bitselect(vnorm2, vdenorm2, vxmask2));
88     const v128_t vf3 = wasm_v128_or(vsign3, wasm_v128_bitselect(vnorm3, vdenorm3, vxmask3));
89     const v128_t vf4 = wasm_v128_or(vsign4, wasm_v128_bitselect(vnorm4, vdenorm4, vxmask4));
90     const v128_t vf5 = wasm_v128_or(vsign5, wasm_v128_bitselect(vnorm5, vdenorm5, vxmask5));
91 
92     wasm_v128_store(output, vf0);
93     wasm_v128_store(output + 4, vf1);
94     wasm_v128_store(output + 8, vf2);
95     wasm_v128_store(output + 12, vf3);
96     wasm_v128_store(output + 16, vf4);
97     wasm_v128_store(output + 20, vf5);
98     output += 24;
99   }
100   for (; n >= 8 * sizeof(uint16_t); n -= 8 * sizeof(uint16_t)) {
101     const v128_t vh = wasm_v128_load(i);
102     i += 8;
103 
104     const v128_t vzero = wasm_i16x8_const_splat(0);
105     const v128_t vw_lo = wasm_v16x8_shuffle(vzero, vh, 0,  8, 1,  9, 2, 10, 3, 11);
106     const v128_t vw_hi = wasm_v16x8_shuffle(vzero, vh, 4, 12, 5, 13, 6, 14, 7, 15);
107 
108     const v128_t vsign_lo = wasm_v128_and(vw_lo, vsign_mask);
109     const v128_t vsign_hi = wasm_v128_and(vw_hi, vsign_mask);
110 
111     const v128_t vnonsign_lo = wasm_v128_xor(vw_lo, vsign_lo);
112     const v128_t vnonsign_hi = wasm_v128_xor(vw_hi, vsign_hi);
113 
114     const v128_t vnorm_lo = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_lo, 3), vexp_offset), vexp_scale);
115     const v128_t vnorm_hi = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_hi, 3), vexp_offset), vexp_scale);
116 
117     const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_lo, 16), vmagic_bias), vmagic_bias);
118     const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_hi, 16), vmagic_bias), vmagic_bias);
119 
120     const v128_t vxmask_lo = wasm_i32x4_gt(vnonsign_lo, vdenorm_cutoff);
121     const v128_t vxmask_hi = wasm_i32x4_gt(vnonsign_hi, vdenorm_cutoff);
122 
123     const v128_t vf_lo = wasm_v128_or(vsign_lo, wasm_v128_bitselect(vnorm_lo, vdenorm_lo, vxmask_lo));
124     const v128_t vf_hi = wasm_v128_or(vsign_hi, wasm_v128_bitselect(vnorm_hi, vdenorm_hi, vxmask_hi));
125 
126     wasm_v128_store(output, vf_lo);
127     wasm_v128_store(output + 4, vf_hi);
128     output += 8;
129   }
130   if XNN_UNLIKELY(n != 0) {
131     assert(n >= 1 * sizeof(uint16_t));
132     assert(n <= 7 * sizeof(uint16_t));
133     const v128_t vh = wasm_v128_load(i);
134 
135     const v128_t vzero = wasm_i16x8_const_splat(0);
136     const v128_t vw_lo = wasm_v16x8_shuffle(vzero, vh, 0,  8, 1,  9, 2, 10, 3, 11);
137     const v128_t vw_hi = wasm_v16x8_shuffle(vzero, vh, 4, 12, 5, 13, 6, 14, 7, 15);
138 
139     const v128_t vsign_lo = wasm_v128_and(vw_lo, vsign_mask);
140     const v128_t vsign_hi = wasm_v128_and(vw_hi, vsign_mask);
141 
142     const v128_t vnonsign_lo = wasm_v128_xor(vw_lo, vsign_lo);
143     const v128_t vnonsign_hi = wasm_v128_xor(vw_hi, vsign_hi);
144 
145     const v128_t vnorm_lo = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_lo, 3), vexp_offset), vexp_scale);
146     const v128_t vnorm_hi = wasm_f32x4_mul(wasm_i32x4_add(wasm_u32x4_shr(vnonsign_hi, 3), vexp_offset), vexp_scale);
147 
148     const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_lo, 16), vmagic_bias), vmagic_bias);
149     const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v128_or(wasm_u32x4_shr(vnonsign_hi, 16), vmagic_bias), vmagic_bias);
150 
151     const v128_t vxmask_lo = wasm_i32x4_gt(vnonsign_lo, vdenorm_cutoff);
152     v128_t vf = wasm_v128_or(vsign_lo, wasm_v128_bitselect(vnorm_lo, vdenorm_lo, vxmask_lo));
153 
154     if (n & (4 * sizeof(uint16_t))) {
155       wasm_v128_store(output, vf);
156       output += 4;
157 
158       const v128_t vxmask_hi = wasm_i32x4_gt(vnonsign_hi, vdenorm_cutoff);
159       vf = wasm_v128_or(vsign_hi, wasm_v128_bitselect(vnorm_hi, vdenorm_hi, vxmask_hi));
160     }
161     if (n & (2 * sizeof(uint16_t))) {
162       *((double*) output) = wasm_f64x2_extract_lane(vf, 0);
163       output += 2;
164 
165       vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
166     }
167     if (n & (1 * sizeof(uint16_t))) {
168       *((float*) output) = wasm_f32x4_extract_lane(vf, 0);
169     }
170   }
171 }
172