xref: /aosp_15_r20/external/XNNPACK/src/f32-f16-vcvt/wasmsimd.c.in (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1// Copyright 2021 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE % 8 == 0
7$assert BATCH_TILE >= 8
8$SIMD_TILE = BATCH_TILE // 8
9#include <assert.h>
10
11#include <wasm_simd128.h>
12
13#include <xnnpack/common.h>
14#include <xnnpack/vcvt.h>
15
16
17void xnn_f32_f16_vcvt_ukernel__wasmsimd_x${BATCH_TILE}(
18    size_t n,
19    const float* input,
20    void* output,
21    const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
22{
23  assert(n != 0);
24  assert(n % sizeof(float) == 0);
25  assert(input != NULL);
26  assert(output != NULL);
27
28  const v128_t vexp_bias = wasm_v128_load64_splat(params->wasmsimd.exp_bias);
29  const v128_t vscale_to_inf = wasm_v128_load64_splat(params->wasmsimd.scale_to_inf);
30  const v128_t vexpw_max = wasm_v128_load64_splat(params->wasmsimd.expw_max);
31  const v128_t vscale_to_zero = wasm_v128_load64_splat(params->wasmsimd.scale_to_zero);
32  const v128_t vbias_min = wasm_v128_load64_splat(params->wasmsimd.bias_min);
33  const v128_t vmanth_mask = wasm_v128_load64_splat(params->wasmsimd.manth_mask);
34  const v128_t vexph_mask = wasm_v128_load64_splat(params->wasmsimd.exph_mask);
35  const v128_t vnanh = wasm_v128_load64_splat(params->wasmsimd.nanh);
36
37  uint16_t* o = (uint16_t*) output;
38  $if BATCH_TILE > 8:
39    for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
40      const v128_t vx0 = wasm_v128_load(input);
41      $for N in range(1, 2*SIMD_TILE):
42        const v128_t vx${N} = wasm_v128_load(input + ${N * 4});
43      input += ${BATCH_TILE};
44
45      $for N in range(2*SIMD_TILE):
46        const v128_t vabsx${N} = wasm_f32x4_abs(vx${N});
47
48      $for N in range(2*SIMD_TILE):
49        const v128_t vsignx${N} = wasm_v128_xor(vx${N}, vabsx${N});
50
51      $for N in range(2*SIMD_TILE):
52        v128_t vbias${N} = wasm_i32x4_add(vabsx${N}, vexp_bias);
53
54      $for N in range(2*SIMD_TILE):
55        v128_t vf${N} = wasm_f32x4_mul(vabsx${N}, vscale_to_inf);
56
57      $for N in range(2*SIMD_TILE):
58        const v128_t vnanmaskw${N} = wasm_i32x4_gt(vabsx${N}, vexpw_max);
59
60      $for N in range(2*SIMD_TILE):
61        vbias${N} = wasm_v128_and(vbias${N}, vexpw_max);
62
63      $for N in range(2*SIMD_TILE):
64        vf${N} = wasm_f32x4_mul(vf${N}, vscale_to_zero);
65
66      $for N in range(SIMD_TILE):
67        const v128_t vnanmaskh${N} = wasm_i16x8_narrow_i32x4(vnanmaskw${2*N}, vnanmaskw${2*N+1});
68
69      $for N in range(SIMD_TILE):
70        const v128_t vsignh${N} = wasm_i16x8_narrow_i32x4(vsignx${2*N}, vsignx${2*N+1});
71
72      $for N in range(2*SIMD_TILE):
73        vbias${N} = wasm_i16x8_max(vbias${N}, vbias_min);
74
75      $for N in range(2*SIMD_TILE):
76        vf${N} = wasm_f32x4_add(vf${N}, vbias${N});
77
78      $for N in range(2*SIMD_TILE):
79        v128_t vexpw${N} = wasm_i32x4_shr(vf${N}, 13);
80
81      $for N in range(2*SIMD_TILE):
82        const v128_t vmantw${N} = wasm_v128_and(vf${N}, vmanth_mask);
83
84      $for N in range(2*SIMD_TILE):
85        vexpw${N} = wasm_v128_and(vexpw${N}, vexph_mask);
86
87      $for N in range(2*SIMD_TILE):
88        const v128_t vnonsignw${N} = wasm_i32x4_add(vmantw${N}, vexpw${N});
89
90      $for N in range(SIMD_TILE):
91        const v128_t vnonsignh${N} = wasm_i16x8_narrow_i32x4(vnonsignw${2*N}, vnonsignw${2*N+1});
92
93      $for N in range(SIMD_TILE):
94        const v128_t vabsh${N} = wasm_v128_bitselect(vnanh, vnonsignh${N}, vnanmaskh${N});
95
96      $for N in range(SIMD_TILE):
97        const v128_t vh${N} = wasm_v128_or(vabsh${N}, vsignh${N});
98
99      wasm_v128_store(o, vh0);
100      $for N in range(1, SIMD_TILE):
101        wasm_v128_store(o + ${N * 8}, vh${N});
102      o += ${BATCH_TILE};
103    }
104  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
105    const v128_t vx_lo = wasm_v128_load(input);
106    const v128_t vx_hi = wasm_v128_load(input + 4);
107    input += 8;
108
109    const v128_t vabsx_lo = wasm_f32x4_abs(vx_lo);
110    const v128_t vabsx_hi = wasm_f32x4_abs(vx_hi);
111
112    const v128_t vsignx_lo = wasm_v128_xor(vx_lo, vabsx_lo);
113    const v128_t vsignx_hi = wasm_v128_xor(vx_hi, vabsx_hi);
114    v128_t vbias_lo = wasm_i32x4_add(vabsx_lo, vexp_bias);
115    v128_t vbias_hi = wasm_i32x4_add(vabsx_hi, vexp_bias);
116    v128_t vf_lo = wasm_f32x4_mul(vabsx_lo, vscale_to_inf);
117    v128_t vf_hi = wasm_f32x4_mul(vabsx_hi, vscale_to_inf);
118    const v128_t vnanmaskw_lo = wasm_i32x4_gt(vabsx_lo, vexpw_max);
119    const v128_t vnanmaskw_hi = wasm_i32x4_gt(vabsx_hi, vexpw_max);
120
121    vbias_lo = wasm_v128_and(vbias_lo, vexpw_max);
122    vbias_hi = wasm_v128_and(vbias_hi, vexpw_max);
123    vf_lo = wasm_f32x4_mul(vf_lo, vscale_to_zero);
124    vf_hi = wasm_f32x4_mul(vf_hi, vscale_to_zero);
125    const v128_t vnanmaskh = wasm_i16x8_narrow_i32x4(vnanmaskw_lo, vnanmaskw_hi);
126    const v128_t vsignh = wasm_i16x8_narrow_i32x4(vsignx_lo, vsignx_hi);
127
128    vbias_lo = wasm_i16x8_max(vbias_lo, vbias_min);
129    vbias_hi = wasm_i16x8_max(vbias_hi, vbias_min);
130
131    vf_lo = wasm_f32x4_add(vf_lo, vbias_lo);
132    vf_hi = wasm_f32x4_add(vf_hi, vbias_hi);
133
134    v128_t vexpw_lo = wasm_i32x4_shr(vf_lo, 13);
135    v128_t vexpw_hi = wasm_i32x4_shr(vf_hi, 13);
136    const v128_t vmantw_lo = wasm_v128_and(vf_lo, vmanth_mask);
137    const v128_t vmantw_hi = wasm_v128_and(vf_hi, vmanth_mask);
138
139    vexpw_lo = wasm_v128_and(vexpw_lo, vexph_mask);
140    vexpw_hi = wasm_v128_and(vexpw_hi, vexph_mask);
141
142    const v128_t vnonsignw_lo = wasm_i32x4_add(vmantw_lo, vexpw_lo);
143    const v128_t vnonsignw_hi = wasm_i32x4_add(vmantw_hi, vexpw_hi);
144
145    const v128_t vnonsignh = wasm_i16x8_narrow_i32x4(vnonsignw_lo, vnonsignw_hi);
146
147    const v128_t vabsh = wasm_v128_bitselect(vnanh, vnonsignh, vnanmaskh);
148
149    const v128_t vh = wasm_v128_or(vabsh, vsignh);
150
151    wasm_v128_store(o, vh);
152    o += 8;
153  }
154  if XNN_UNPREDICTABLE(n != 0) {
155    const v128_t vx_lo = wasm_v128_load(input);
156    const float* input_hi = (const float*) ((uintptr_t) input + (n & (4 * sizeof(float))));
157    const v128_t vx_hi = wasm_v128_load(input_hi);
158
159    const v128_t vabsx_lo = wasm_f32x4_abs(vx_lo);
160    const v128_t vabsx_hi = wasm_f32x4_abs(vx_hi);
161
162    const v128_t vsignx_lo = wasm_v128_xor(vx_lo, vabsx_lo);
163    const v128_t vsignx_hi = wasm_v128_xor(vx_hi, vabsx_hi);
164    v128_t vbias_lo = wasm_i32x4_add(vabsx_lo, vexp_bias);
165    v128_t vbias_hi = wasm_i32x4_add(vabsx_hi, vexp_bias);
166    v128_t vf_lo = wasm_f32x4_mul(vabsx_lo, vscale_to_inf);
167    v128_t vf_hi = wasm_f32x4_mul(vabsx_hi, vscale_to_inf);
168    const v128_t vnanmaskw_lo = wasm_i32x4_gt(vabsx_lo, vexpw_max);
169    const v128_t vnanmaskw_hi = wasm_i32x4_gt(vabsx_hi, vexpw_max);
170
171    vbias_lo = wasm_v128_and(vbias_lo, vexpw_max);
172    vbias_hi = wasm_v128_and(vbias_hi, vexpw_max);
173    vf_lo = wasm_f32x4_mul(vf_lo, vscale_to_zero);
174    vf_hi = wasm_f32x4_mul(vf_hi, vscale_to_zero);
175    const v128_t vnanmaskh = wasm_i16x8_narrow_i32x4(vnanmaskw_lo, vnanmaskw_hi);
176    const v128_t vsignh = wasm_i16x8_narrow_i32x4(vsignx_lo, vsignx_hi);
177
178    vbias_lo = wasm_i16x8_max(vbias_lo, vbias_min);
179    vbias_hi = wasm_i16x8_max(vbias_hi, vbias_min);
180
181    vf_lo = wasm_f32x4_add(vf_lo, vbias_lo);
182    vf_hi = wasm_f32x4_add(vf_hi, vbias_hi);
183
184    v128_t vexpw_lo = wasm_i32x4_shr(vf_lo, 13);
185    v128_t vexpw_hi = wasm_i32x4_shr(vf_hi, 13);
186    const v128_t vmantw_lo = wasm_v128_and(vf_lo, vmanth_mask);
187    const v128_t vmantw_hi = wasm_v128_and(vf_hi, vmanth_mask);
188
189    vexpw_lo = wasm_v128_and(vexpw_lo, vexph_mask);
190    vexpw_hi = wasm_v128_and(vexpw_hi, vexph_mask);
191
192    const v128_t vnonsignw_lo = wasm_i32x4_add(vmantw_lo, vexpw_lo);
193    const v128_t vnonsignw_hi = wasm_i32x4_add(vmantw_hi, vexpw_hi);
194
195    const v128_t vnonsignh = wasm_i16x8_narrow_i32x4(vnonsignw_lo, vnonsignw_hi);
196
197    const v128_t vabsh = wasm_v128_bitselect(vnanh, vnonsignh, vnanmaskh);
198
199    v128_t vh = wasm_v128_or(vabsh, vsignh);
200
201    if (n & (4 * sizeof(float))) {
202      *((double*) o) = wasm_f64x2_extract_lane(vh, 0);
203      vh = wasm_v64x2_shuffle(vh, vh, 1, 1);
204      o += 4;
205    }
206    if (n & (2 * sizeof(float))) {
207      *((float*) o) = (float) wasm_f32x4_extract_lane(vh, 0);
208      vh = wasm_i64x2_shr(vh, 32);
209      o += 2;
210    }
211    const uint32_t vh_lo = wasm_i32x4_extract_lane(vh, 0);
212    if (n & (1 * sizeof(float))) {
213      *o = (uint16_t) vh_lo;
214    }
215  }
216}
217