xref: /aosp_15_r20/external/XNNPACK/src/qs8-f32-vcvt/sse2.c.in (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1// Copyright 2021 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE % 8 == 0
7$assert BATCH_TILE >= 8
8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
9#include <assert.h>
10
11#include <emmintrin.h>
12
13#include <xnnpack/common.h>
14#include <xnnpack/intrinsics-polyfill.h>
15#include <xnnpack/vcvt.h>
16
17
18$XINT8_T = {"QS8": "int8_t", "QU8": "uint8_t"}[DATATYPE]
19$_MM_CVTEPX8_EPI32 = {"QS8": "_mm_cvtepi8_epi32", "QU8": "_mm_cvtepu8_epi32"}[DATATYPE]
20void xnn_${DATATYPE.lower()}_f32_vcvt_ukernel__sse2_x${BATCH_TILE}(
21    size_t n,
22    const ${XINT8_T}* x,
23    float* y,
24    const union xnn_${DATATYPE.lower()}_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
25{
26  assert(n != 0);
27  assert(n % sizeof(${XINT8_T}) == 0);
28  assert(x != NULL);
29  assert(y != NULL);
30
31  $if DATATYPE == "QS8":
32    const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->sse2.sign_mask);
33  const __m128i vmagic_exp = _mm_load_si128((const __m128i*) params->sse2.magic_exp);
34  const __m128 vmagic_bias = _mm_load_ps(params->sse2.magic_bias);
35  const __m128 vscale = _mm_load_ps(params->sse2.scale);
36  const __m128i vzero = _mm_setzero_si128();
37  $if BATCH_TILE > 8:
38    for (; n >= ${BATCH_TILE} * sizeof(${XINT8_T}); n -= ${BATCH_TILE} * sizeof(${XINT8_T})) {
39      __m128i vx${ABC[0:8]} = _mm_loadl_epi64((const __m128i*) x);
40      $for N in range(8, BATCH_TILE, 8):
41        __m128i vx${ABC[N:N+8]} = _mm_loadl_epi64((const __m128i*) (x + ${N}));
42      x += ${BATCH_TILE};
43
44      $if DATATYPE == "QS8":
45        $for N in range(0, BATCH_TILE, 8):
46          vx${ABC[N:N+8]} = _mm_xor_si128(vx${ABC[N:N+8]}, vsign_mask);
47
48      $for N in range(0, BATCH_TILE, 8):
49        vx${ABC[N:N+8]} = _mm_unpacklo_epi8(vx${ABC[N:N+8]}, vzero);
50
51      $for N in range(0, BATCH_TILE, 8):
52        __m128 vy${ABC[N:N+4]} = _mm_castsi128_ps(_mm_unpacklo_epi16(vx${ABC[N:N+8]}, vmagic_exp));
53        __m128 vy${ABC[N+4:N+8]} = _mm_castsi128_ps(_mm_unpackhi_epi16(vx${ABC[N:N+8]}, vmagic_exp));
54
55      $for N in range(0, BATCH_TILE, 4):
56        vy${ABC[N:N+4]} = _mm_sub_ps(vy${ABC[N:N+4]}, vmagic_bias);
57
58      $for N in range(0, BATCH_TILE, 4):
59        vy${ABC[N:N+4]} = _mm_mul_ps(vy${ABC[N:N+4]}, vscale);
60
61      _mm_storeu_ps(y, vy${ABC[0:4]});
62      $for N in range(4, BATCH_TILE, 4):
63        _mm_storeu_ps(y + ${N}, vy${ABC[N:N+4]});
64      y += ${BATCH_TILE};
65    }
66  for (; n >= 8 * sizeof(${XINT8_T}); n -= 8 * sizeof(${XINT8_T})) {
67    __m128i vx = _mm_loadl_epi64((const __m128i*) x);
68    $if DATATYPE == "QS8":
69      vx = _mm_xor_si128(vx, vsign_mask);
70    vx = _mm_unpacklo_epi8(vx, vzero);
71    x += 8;
72
73    __m128 vy_lo = _mm_castsi128_ps(_mm_unpacklo_epi16(vx, vmagic_exp));
74    __m128 vy_hi = _mm_castsi128_ps(_mm_unpackhi_epi16(vx, vmagic_exp));
75
76    vy_lo = _mm_sub_ps(vy_lo, vmagic_bias);
77    vy_hi = _mm_sub_ps(vy_hi, vmagic_bias);
78
79    vy_lo = _mm_mul_ps(vy_lo, vscale);
80    vy_hi = _mm_mul_ps(vy_hi, vscale);
81
82    _mm_storeu_ps(y, vy_lo);
83    _mm_storeu_ps(y + 4, vy_hi);
84    y += 8;
85  }
86  if XNN_UNLIKELY(n != 0) {
87    assert(n >= 1 * sizeof(${XINT8_T}));
88    assert(n <= 7 * sizeof(${XINT8_T}));
89
90    __m128i vx = _mm_loadl_epi64((const __m128i*) x);
91    $if DATATYPE == "QS8":
92      vx = _mm_xor_si128(vx, vsign_mask);
93    vx = _mm_unpacklo_epi8(vx, vzero);
94
95    __m128 vy = _mm_castsi128_ps(_mm_unpacklo_epi16(vx, vmagic_exp));
96    vy = _mm_sub_ps(vy, vmagic_bias);
97    vy = _mm_mul_ps(vy, vscale);
98
99    if (n & (4 * sizeof(${XINT8_T}))) {
100      _mm_storeu_ps(y, vy);
101      vy = _mm_castsi128_ps(_mm_unpackhi_epi16(vx, vmagic_exp));
102      vy = _mm_sub_ps(vy, vmagic_bias);
103      vy = _mm_mul_ps(vy, vscale);
104      y += 4;
105    }
106    if (n & (2 * sizeof(${XINT8_T}))) {
107      _mm_storel_pi((__m64*) y, vy);
108      vy = _mm_movehl_ps(vy, vy);
109      y += 2;
110    }
111    if (n & (1 * sizeof(${XINT8_T}))) {
112      _mm_store_ss(y, vy);
113    }
114  }
115}
116