1// Copyright 2021 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert BATCH_TILE % 8 == 0 7$assert BATCH_TILE >= 8 8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 9#include <assert.h> 10 11#include <immintrin.h> 12 13#include <xnnpack/common.h> 14#include <xnnpack/unaligned.h> 15#include <xnnpack/vcvt.h> 16 17 18$XINT8_T = {"QS8": "int8_t", "QU8": "uint8_t"}[DATATYPE] 19$_MM_CVTEPX8_EPI32 = {"QS8": "_mm_cvtepi8_epi32", "QU8": "_mm_cvtepu8_epi32"}[DATATYPE] 20void xnn_${DATATYPE.lower()}_f32_vcvt_ukernel__sse41_x${BATCH_TILE}( 21 size_t n, 22 const ${XINT8_T}* x, 23 float* y, 24 const union xnn_${DATATYPE.lower()}_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS 25{ 26 assert(n != 0); 27 assert(n % sizeof(${XINT8_T}) == 0); 28 assert(x != NULL); 29 assert(y != NULL); 30 31 const __m128i vminus_zero_point = _mm_load_si128((const __m128i*) params->sse4.minus_zero_point); 32 const __m128 vscale = _mm_load_ps(params->sse4.scale); 33 $if BATCH_TILE > 4: 34 for (; n >= ${BATCH_TILE} * sizeof(${XINT8_T}); n -= ${BATCH_TILE} * sizeof(${XINT8_T})) { 35 __m128i vx${ABC[0:4]} = ${_MM_CVTEPX8_EPI32}(_mm_cvtsi32_si128((int) unaligned_load_s32(x))); 36 $for N in range(4, BATCH_TILE, 4): 37 __m128i vx${ABC[N:N+4]} = ${_MM_CVTEPX8_EPI32}(_mm_cvtsi32_si128((int) unaligned_load_s32(x + ${N}))); 38 x += ${BATCH_TILE}; 39 40 $for N in range(0, BATCH_TILE, 4): 41 vx${ABC[N:N+4]} = _mm_add_epi32(vx${ABC[N:N+4]}, vminus_zero_point); 42 43 $for N in range(0, BATCH_TILE, 4): 44 __m128 vy${ABC[N:N+4]} = _mm_cvtepi32_ps(vx${ABC[N:N+4]}); 45 46 $for N in range(0, BATCH_TILE, 4): 47 vy${ABC[N:N+4]} = _mm_mul_ps(vy${ABC[N:N+4]}, vscale); 48 49 _mm_storeu_ps(y, vy${ABC[0:4]}); 50 $for N in range(4, BATCH_TILE, 4): 51 _mm_storeu_ps(y + ${N}, vy${ABC[N:N+4]}); 52 y += ${BATCH_TILE}; 53 } 54 for (; n >= 4 * sizeof(${XINT8_T}); n -= 4 * sizeof(${XINT8_T})) { 55 __m128i vx = ${_MM_CVTEPX8_EPI32}(_mm_cvtsi32_si128((int) unaligned_load_s32(x))); 56 vx = _mm_add_epi32(vx, vminus_zero_point); 57 x += 4; 58 59 __m128 vy = _mm_cvtepi32_ps(vx); 60 vy = _mm_mul_ps(vy, vscale); 61 62 _mm_storeu_ps(y, vy); 63 y += 4; 64 } 65 if XNN_UNLIKELY(n != 0) { 66 assert(n >= 1 * sizeof(${XINT8_T})); 67 assert(n <= 3 * sizeof(${XINT8_T})); 68 69 __m128i vx = ${_MM_CVTEPX8_EPI32}(_mm_cvtsi32_si128((int) unaligned_load_s32(x))); 70 vx = _mm_add_epi32(vx, vminus_zero_point); 71 72 __m128 vy = _mm_cvtepi32_ps(vx); 73 vy = _mm_mul_ps(vy, vscale); 74 75 if (n & (2 * sizeof(${XINT8_T}))) { 76 _mm_storel_pi((__m64*) y, vy); 77 vy = _mm_movehl_ps(vy, vy); 78 y += 2; 79 } 80 if (n & (1 * sizeof(${XINT8_T}))) { 81 _mm_store_ss(y, vy); 82 } 83 } 84} 85