xref: /aosp_15_r20/external/XNNPACK/src/f16-f32-vcvt/wasmsimd-int16.c.in (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1// Copyright 2021 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE % 8 == 0
7$assert BATCH_TILE >= 8
8$SIMD_TILE = BATCH_TILE // 8
9$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
10#include <assert.h>
11
12#include <wasm_simd128.h>
13
14#include <xnnpack/common.h>
15#include <xnnpack/vcvt.h>
16
17
18void xnn_f16_f32_vcvt_ukernel__wasmsimd_int16_x${BATCH_TILE}(
19    size_t n,
20    const void* input,
21    float* output,
22    const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
23{
24  assert(n != 0);
25  assert(n % sizeof(uint16_t) == 0);
26  assert(input != NULL);
27  assert(output != NULL);
28
29  const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd_int16.sign_mask);
30  const v128_t vexp_offset = wasm_v128_load64_splat(params->wasmsimd_int16.exp_offset);
31  const v128_t vexp_scale = wasm_v128_load64_splat(params->wasmsimd_int16.exp_scale);
32  const v128_t vmagic_mask = wasm_v128_load64_splat(params->wasmsimd_int16.magic_mask);
33  const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_int16.magic_bias);
34  const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_int16.denorm_cutoff);
35
36  const uint16_t* i = (const uint16_t*) input;
37  $if BATCH_TILE > 8:
38    for (; n >= ${BATCH_TILE} * sizeof(uint16_t); n -= ${BATCH_TILE} * sizeof(uint16_t)) {
39      const v128_t vh0 = wasm_v128_load(i);
40      $for N in range(1, SIMD_TILE):
41        const v128_t vh${N} = wasm_v128_load(i + ${N * 8});
42      i += ${BATCH_TILE};
43
44      $for N in range(SIMD_TILE):
45        const v128_t vsign${N} = wasm_v128_and(vh${N}, vsign_mask);
46
47      $for N in range(SIMD_TILE):
48        const v128_t vnonsign${N} = wasm_v128_xor(vh${N}, vsign${N});
49
50      $for N in range(SIMD_TILE):
51        const v128_t vprenorm${N*2} = wasm_i16x8_shl(vnonsign${N}, 13);
52        const v128_t vprenorm${N*2+1} = wasm_i16x8_add(wasm_u16x8_shr(vnonsign${N}, 3), vexp_offset);
53
54      $for N in range(SIMD_TILE):
55        const v128_t vnorm${N*2} = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm${N*2}, vprenorm${N*2+1}, 0,  8, 1,  9, 2, 10, 3, 11), vexp_scale);
56        const v128_t vnorm${N*2+1} = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm${N*2}, vprenorm${N*2+1}, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
57
58      $for N in range(SIMD_TILE):
59        const v128_t vdenorm${N*2} = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign${N}, vmagic_mask, 0,  8, 1,  9, 2, 10, 3, 11), vmagic_bias);
60        const v128_t vdenorm${N*2+1} = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign${N}, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
61
62      $for N in range(SIMD_TILE):
63        const v128_t vmask${N} = wasm_i16x8_gt(vnonsign${N}, vdenorm_cutoff);
64      const v128_t vzero = wasm_i16x8_const_splat(0);
65
66      $for N in range(SIMD_TILE):
67        const v128_t vxmask${N*2} = wasm_i32x4_extend_low_i16x8(vmask${N});
68        const v128_t vxmask${N*2+1} = wasm_i32x4_extend_high_i16x8(vmask${N});
69
70      $for N in range(SIMD_TILE):
71        const v128_t vf${N*2} = wasm_v128_or(wasm_v16x8_shuffle(vzero, vsign${N}, 0,  8, 1,  9, 2, 10, 3, 11),
72          wasm_v128_bitselect(vnorm${N*2}, vdenorm${N*2}, vxmask${N*2}));
73        const v128_t vf${N*2+1} = wasm_v128_or(wasm_v16x8_shuffle(vzero, vsign${N}, 4, 12, 5, 13, 6, 14, 7, 15),
74          wasm_v128_bitselect(vnorm${N*2+1}, vdenorm${N*2+1}, vxmask${N*2+1}));
75
76      wasm_v128_store(output, vf0);
77      $for N in range(1, 2*SIMD_TILE):
78        wasm_v128_store(output + ${N*4}, vf${N});
79      output += ${BATCH_TILE};
80    }
81  for (; n >= 8 * sizeof(uint16_t); n -= 8 * sizeof(uint16_t)) {
82    const v128_t vh = wasm_v128_load(i);
83    i += 8;
84
85    const v128_t vsign = wasm_v128_and(vh, vsign_mask);
86
87    const v128_t vnonsign = wasm_v128_xor(vh, vsign);
88
89    const v128_t vprenorm_lo = wasm_i16x8_shl(vnonsign, 13);
90    const v128_t vprenorm_hi = wasm_i16x8_add(wasm_u16x8_shr(vnonsign, 3), vexp_offset);
91
92    const v128_t vnorm_lo = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 0,  8, 1,  9, 2, 10, 3, 11), vexp_scale);
93    const v128_t vnorm_hi = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
94
95    const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 0,  8, 1,  9, 2, 10, 3, 11), vmagic_bias);
96    const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
97
98    const v128_t vmask = wasm_i16x8_gt(vnonsign, vdenorm_cutoff);
99    const v128_t vzero = wasm_i16x8_const_splat(0);
100
101    const v128_t vxmask_lo = wasm_i32x4_extend_low_i16x8(vmask);
102    const v128_t vf_lo = wasm_v128_or(wasm_v16x8_shuffle(vzero, vsign, 0,  8, 1,  9, 2, 10, 3, 11),
103      wasm_v128_bitselect(vnorm_lo, vdenorm_lo, vxmask_lo));
104
105    const v128_t vxmask_hi = wasm_i32x4_extend_high_i16x8(vmask);
106    const v128_t vf_hi = wasm_v128_or(wasm_v16x8_shuffle(vzero, vsign, 4, 12, 5, 13, 6, 14, 7, 15),
107      wasm_v128_bitselect(vnorm_hi, vdenorm_hi, vxmask_hi));
108
109    wasm_v128_store(output, vf_lo);
110    wasm_v128_store(output + 4, vf_hi);
111    output += 8;
112  }
113  if XNN_UNLIKELY(n != 0) {
114    assert(n >= 1 * sizeof(uint16_t));
115    assert(n <= 7 * sizeof(uint16_t));
116    const v128_t vh = wasm_v128_load(i);
117
118    const v128_t vsign = wasm_v128_and(vh, vsign_mask);
119
120    const v128_t vnonsign = wasm_v128_xor(vh, vsign);
121
122    const v128_t vprenorm_lo = wasm_i16x8_shl(vnonsign, 13);
123    const v128_t vprenorm_hi = wasm_i16x8_add(wasm_u16x8_shr(vnonsign, 3), vexp_offset);
124
125    const v128_t vnorm_lo = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 0,  8, 1,  9, 2, 10, 3, 11), vexp_scale);
126    const v128_t vnorm_hi = wasm_f32x4_mul(wasm_v16x8_shuffle(vprenorm_lo, vprenorm_hi, 4, 12, 5, 13, 6, 14, 7, 15), vexp_scale);
127
128    const v128_t vdenorm_lo = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 0,  8, 1,  9, 2, 10, 3, 11), vmagic_bias);
129    const v128_t vdenorm_hi = wasm_f32x4_sub(wasm_v16x8_shuffle(vnonsign, vmagic_mask, 4, 12, 5, 13, 6, 14, 7, 15), vmagic_bias);
130
131    const v128_t vmask = wasm_i16x8_gt(vnonsign, vdenorm_cutoff);
132    const v128_t vzero = wasm_i16x8_const_splat(0);
133
134    const v128_t vxmask_lo = wasm_i32x4_extend_low_i16x8(vmask);
135    v128_t vf = wasm_v128_or(wasm_v16x8_shuffle(vzero, vsign, 0, 8, 1, 9, 2, 10, 3, 11),
136      wasm_v128_bitselect(vnorm_lo, vdenorm_lo, vxmask_lo));
137
138    if (n & (4 * sizeof(uint16_t))) {
139      wasm_v128_store(output, vf);
140      output += 4;
141
142      const v128_t vxmask_hi = wasm_i32x4_extend_high_i16x8(vmask);
143      vf = wasm_v128_or(wasm_v16x8_shuffle(vzero, vsign, 4, 12, 5, 13, 6, 14, 7, 15),
144        wasm_v128_bitselect(vnorm_hi, vdenorm_hi, vxmask_hi));
145    }
146    if (n & (2 * sizeof(uint16_t))) {
147      *((double*) output) = wasm_f64x2_extract_lane(vf, 0);
148      output += 2;
149
150      vf = wasm_v64x2_shuffle(vf, vf, 1, 1);
151    }
152    if (n & (1 * sizeof(uint16_t))) {
153      *((float*) output) = wasm_f32x4_extract_lane(vf, 0);
154    }
155  }
156}
157