xref: /aosp_15_r20/external/XNNPACK/src/qs8-f32-vcvt/avx2.c.in (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1// Copyright 2021 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE % 8 == 0
7$assert BATCH_TILE >= 8
8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
9#include <assert.h>
10
11#include <immintrin.h>
12
13#include <xnnpack/common.h>
14#include <xnnpack/intrinsics-polyfill.h>
15#include <xnnpack/vcvt.h>
16
17
18$XINT8_T = {"QS8": "int8_t", "QU8": "uint8_t"}[DATATYPE]
19$_MM256_CVTEPX8_EPI32 = {"QS8": "_mm256_cvtepi8_epi32", "QU8": "_mm256_cvtepu8_epi32"}[DATATYPE]
20void xnn_${DATATYPE.lower()}_f32_vcvt_ukernel__avx2_x${BATCH_TILE}(
21    size_t n,
22    const ${XINT8_T}* x,
23    float* y,
24    const union xnn_${DATATYPE.lower()}_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
25{
26  assert(n != 0);
27  assert(n % sizeof(${XINT8_T}) == 0);
28  assert(x != NULL);
29  assert(y != NULL);
30
31  const __m256i vminus_zero_point = _mm256_load_si256((const __m256i*) params->avx.minus_zero_point);
32  const __m256 vscale = _mm256_load_ps(params->avx.scale);
33  for (; n >= ${BATCH_TILE} * sizeof(${XINT8_T}); n -= ${BATCH_TILE} * sizeof(${XINT8_T})) {
34    __m256i vx${ABC[0:8]} = ${_MM256_CVTEPX8_EPI32}(_mm_loadl_epi64((const __m128i*) x));
35    $for N in range(8, BATCH_TILE, 8):
36      __m256i vx${ABC[N:N+8]} = ${_MM256_CVTEPX8_EPI32}(_mm_loadl_epi64((const __m128i*) (x + ${N})));
37    x += ${BATCH_TILE};
38
39    $for N in range(0, BATCH_TILE, 8):
40      vx${ABC[N:N+8]} = _mm256_add_epi32(vx${ABC[N:N+8]}, vminus_zero_point);
41
42    $for N in range(0, BATCH_TILE, 8):
43      __m256 vy${ABC[N:N+8]} = _mm256_cvtepi32_ps(vx${ABC[N:N+8]});
44
45    $for N in range(0, BATCH_TILE, 8):
46      vy${ABC[N:N+8]} = _mm256_mul_ps(vy${ABC[N:N+8]}, vscale);
47
48    _mm256_storeu_ps(y, vy${ABC[0:8]});
49    $for N in range(8, BATCH_TILE, 8):
50      _mm256_storeu_ps(y + ${N}, vy${ABC[N:N+8]});
51    y += ${BATCH_TILE};
52  }
53  for (; n >= 8 * sizeof(${XINT8_T}); n -= 8 * sizeof(${XINT8_T})) {
54    __m256i vx = ${_MM256_CVTEPX8_EPI32}(_mm_loadl_epi64((const __m128i*) x));
55    vx = _mm256_add_epi32(vx, vminus_zero_point);
56    x += 8;
57
58    __m256 vy = _mm256_cvtepi32_ps(vx);
59    vy = _mm256_mul_ps(vy, vscale);
60
61    _mm256_storeu_ps(y, vy);
62    y += 8;
63  }
64  if XNN_UNLIKELY(n != 0) {
65    assert(n >= 1 * sizeof(${XINT8_T}));
66    assert(n <= 7 * sizeof(${XINT8_T}));
67
68    __m256i vx = ${_MM256_CVTEPX8_EPI32}(_mm_loadl_epi64((const __m128i*) x));
69    vx = _mm256_add_epi32(vx, vminus_zero_point);
70
71    __m256 vy = _mm256_cvtepi32_ps(vx);
72    vy = _mm256_mul_ps(vy, vscale);
73
74    __m128 vy_lo = _mm256_castps256_ps128(vy);
75    if (n & (4 * sizeof(${XINT8_T}))) {
76      _mm_storeu_ps(y, vy_lo);
77      vy_lo = _mm256_extractf128_ps(vy, 1);
78      y += 4;
79    }
80    if (n & (2 * sizeof(${XINT8_T}))) {
81      _mm_storel_pi((__m64*) y, vy_lo);
82      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
83      y += 2;
84    }
85    if (n & (1 * sizeof(${XINT8_T}))) {
86      _mm_store_ss(y, vy_lo);
87    }
88  }
89}
90