xref: /aosp_15_r20/external/XNNPACK/src/qs8-f32-vcvt/wasmsimd.c.in (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1// Copyright 2021 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE % 8 == 0
7$assert BATCH_TILE >= 8
8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
9#include <assert.h>
10
11#include <wasm_simd128.h>
12
13#include <xnnpack/common.h>
14#include <xnnpack/intrinsics-polyfill.h>
15#include <xnnpack/vcvt.h>
16
17
18$XINT8_T = {"QS8": "int8_t", "QU8": "uint8_t"}[DATATYPE]
19$WASM_X16X8_LOAD8X8 = {"QS8": "wasm_i16x8_load8x8", "QU8": "wasm_u16x8_load8x8"}[DATATYPE]
20void xnn_${DATATYPE.lower()}_f32_vcvt_ukernel__wasmsimd_x${BATCH_TILE}(
21    size_t n,
22    const ${XINT8_T}* x,
23    float* y,
24    const union xnn_${DATATYPE.lower()}_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
25{
26  assert(n != 0);
27  assert(n % sizeof(${XINT8_T}) == 0);
28  assert(x != NULL);
29  assert(y != NULL);
30
31  const v128_t vminus_zero_point = wasm_v128_load64_splat(params->wasmsimd.minus_zero_point);
32  const v128_t vscale = wasm_v128_load64_splat(params->wasmsimd.scale);
33  $if BATCH_TILE > 8:
34    for (; n >= ${BATCH_TILE} * sizeof(${XINT8_T}); n -= ${BATCH_TILE} * sizeof(${XINT8_T})) {
35      v128_t vx${ABC[0:8]} = ${WASM_X16X8_LOAD8X8}(x);
36      $for N in range(8, BATCH_TILE, 8):
37        v128_t vx${ABC[N:N+8]} = ${WASM_X16X8_LOAD8X8}(x + ${N});
38      x += ${BATCH_TILE};
39
40      $for N in range(0, BATCH_TILE, 8):
41        vx${ABC[N:N+8]} = wasm_i16x8_add(vx${ABC[N:N+8]}, vminus_zero_point);
42
43      $for N in range(0, BATCH_TILE, 8):
44        v128_t vy${ABC[N:N+4]} = wasm_i32x4_extend_low_i16x8(vx${ABC[N:N+8]});
45        v128_t vy${ABC[N+4:N+8]} = wasm_i32x4_extend_high_i16x8(vx${ABC[N:N+8]});
46
47      $for N in range(0, BATCH_TILE, 4):
48        vy${ABC[N:N+4]} = wasm_f32x4_convert_i32x4(vy${ABC[N:N+4]});
49
50      $for N in range(0, BATCH_TILE, 4):
51        vy${ABC[N:N+4]} = wasm_f32x4_mul(vy${ABC[N:N+4]}, vscale);
52
53      wasm_v128_store(y, vy${ABC[0:4]});
54      $for N in range(4, BATCH_TILE, 4):
55        wasm_v128_store(y + ${N}, vy${ABC[N:N+4]});
56      y += ${BATCH_TILE};
57    }
58  for (; n >= 8 * sizeof(${XINT8_T}); n -= 8 * sizeof(${XINT8_T})) {
59    v128_t vx = ${WASM_X16X8_LOAD8X8}(x);
60    vx = wasm_i16x8_add(vx, vminus_zero_point);
61    x += 8;
62
63    v128_t vy_lo = wasm_i32x4_extend_low_i16x8(vx);
64    v128_t vy_hi = wasm_i32x4_extend_high_i16x8(vx);
65
66    vy_lo = wasm_f32x4_convert_i32x4(vy_lo);
67    vy_hi = wasm_f32x4_convert_i32x4(vy_hi);
68
69    vy_lo = wasm_f32x4_mul(vy_lo, vscale);
70    vy_hi = wasm_f32x4_mul(vy_hi, vscale);
71
72    wasm_v128_store(y, vy_lo);
73    wasm_v128_store(y + 4, vy_hi);
74    y += 8;
75  }
76  if XNN_UNLIKELY(n != 0) {
77    assert(n >= 1 * sizeof(${XINT8_T}));
78    assert(n <= 7 * sizeof(${XINT8_T}));
79
80    v128_t vx = ${WASM_X16X8_LOAD8X8}(x);
81    vx = wasm_i16x8_add(vx, vminus_zero_point);
82    x += 8;
83
84    v128_t vy = wasm_i32x4_extend_low_i16x8(vx);
85    vy = wasm_f32x4_convert_i32x4(vy);
86    vy = wasm_f32x4_mul(vy, vscale);
87
88    if (n & (4 * sizeof(${XINT8_T}))) {
89      wasm_v128_store(y, vy); y += 4;
90      vy = wasm_i32x4_extend_high_i16x8(vx);
91      vy = wasm_f32x4_convert_i32x4(vy);
92      vy = wasm_f32x4_mul(vy, vscale);
93    }
94    if (n & (2 * sizeof(${XINT8_T}))) {
95      *((double*) y) = wasm_f64x2_extract_lane(vy, 0);
96      vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
97      y += 2;
98    }
99    if (n & (1 * sizeof(${XINT8_T}))) {
100      *y = wasm_f32x4_extract_lane(vy, 0);
101    }
102  }
103}
104