1// Copyright 2021 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert SSE in [2, 4] 7$assert DATATYPE in ["QS8", "QU8"] 8$assert BATCH_TILE % 8 == 0 9$assert BATCH_TILE >= 8 10$SSE_HEADER = {2: "emmintrin.h", 4: "smmintrin.h"}[SSE] 11$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 12#include <assert.h> 13 14#include <${SSE_HEADER}> 15 16#include <xnnpack/common.h> 17#include <xnnpack/unaligned.h> 18#include <xnnpack/vcvt.h> 19 20 21$ISA = {2: "sse2", 4: "sse41"}[SSE] 22$XINT8_T = {"QS8": "int8_t", "QU8": "uint8_t"}[DATATYPE] 23$_MM_PACKXS_EPI16 = {"QS8": "_mm_packs_epi16", "QU8": "_mm_packus_epi16"}[DATATYPE] 24$_MM_MAX_EPX8 = {"QS8": "_mm_max_epi8", "QU8": "_mm_max_epu8"}[DATATYPE] 25void xnn_f32_${DATATYPE.lower()}_vcvt_ukernel__${ISA}_x${BATCH_TILE}( 26 size_t n, 27 const float* x, 28 ${XINT8_T}* y, 29 const union xnn_f32_${DATATYPE.lower()}_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS 30{ 31 assert(n != 0); 32 assert(n % sizeof(float) == 0); 33 assert(x != NULL); 34 assert(y != NULL); 35 36 const __m128 vscale = _mm_load_ps(params->sse${SSE}.scale); 37 const __m128 voutput_max_less_zero_point = _mm_load_ps(params->sse${SSE}.output_max_less_zero_point); 38 const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse${SSE}.output_zero_point); 39 const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse${SSE}.output_min); 40 41 $if BATCH_TILE > 8: 42 for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) { 43 __m128 vx${ABC[0:4]} = _mm_loadu_ps(x); 44 $for N in range(4, BATCH_TILE, 4): 45 __m128 vx${ABC[N:N+4]} = _mm_loadu_ps(x + ${N}); 46 x += ${BATCH_TILE}; 47 48 $for N in range(0, BATCH_TILE, 4): 49 vx${ABC[N:N+4]} = _mm_mul_ps(vx${ABC[N:N+4]}, vscale); 50 51 $for N in range(0, BATCH_TILE, 4): 52 vx${ABC[N:N+4]} = _mm_min_ps(vx${ABC[N:N+4]}, voutput_max_less_zero_point); 53 54 $for N in range(0, BATCH_TILE, 4): 55 const __m128i vy${ABC[N:N+4]} = _mm_cvtps_epi32(vx${ABC[N:N+4]}); 56 57 $for N in range(0, BATCH_TILE, 8): 58 __m128i vy${ABC[N:N+8]} = _mm_packs_epi32(vy${ABC[N:N+4]}, vy${ABC[N+4:N+8]}); 59 60 $for N in range(0, BATCH_TILE, 8): 61 vy${ABC[N:N+8]} = _mm_adds_epi16(vy${ABC[N:N+8]}, voutput_zero_point); 62 63 $if DATATYPE == "QS8" and SSE < 4: 64 $for N in range(0, BATCH_TILE, 8): 65 vy${ABC[N:N+8]} = _mm_max_epi16(vy${ABC[N:N+8]}, voutput_min); 66 67 $for N in range(0, BATCH_TILE, 16): 68 $if N + 8 < BATCH_TILE: 69 __m128i vy${ABC[N:N+16]} = ${_MM_PACKXS_EPI16}(vy${ABC[N:N+8]}, vy${ABC[N+8:N+16]}); 70 $else: 71 vy${ABC[N:N+8]} = ${_MM_PACKXS_EPI16}(vy${ABC[N:N+8]}, vy${ABC[N:N+8]}); 72 73 $if DATATYPE == "QU8" or SSE == 4: 74 $for N in range(0, BATCH_TILE, 16): 75 $if N + 8 < BATCH_TILE: 76 vy${ABC[N:N+16]} = ${_MM_MAX_EPX8}(vy${ABC[N:N+16]}, voutput_min); 77 $else: 78 vy${ABC[N:N+8]} = ${_MM_MAX_EPX8}(vy${ABC[N:N+8]}, voutput_min); 79 80 _mm_storeu_si128((__m128i*) y, vy${ABC[0:16]}); 81 $for N in range(16, BATCH_TILE, 16): 82 $if N + 8 < BATCH_TILE: 83 _mm_storeu_si128((__m128i*) (y + ${N}), vy${ABC[N:N+16]}); 84 $else: 85 _mm_storel_epi64((__m128i*) (y + ${N}), vy${ABC[N:N+8]}); 86 y += ${BATCH_TILE}; 87 } 88 for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) { 89 __m128 vx_lo = _mm_loadu_ps(x); 90 __m128 vx_hi = _mm_loadu_ps(x + 4); 91 x += 8; 92 93 vx_lo = _mm_mul_ps(vx_lo, vscale); 94 vx_hi = _mm_mul_ps(vx_hi, vscale); 95 96 vx_lo = _mm_min_ps(vx_lo, voutput_max_less_zero_point); 97 vx_hi = _mm_min_ps(vx_hi, voutput_max_less_zero_point); 98 99 const __m128i vy_lo = _mm_cvtps_epi32(vx_lo); 100 const __m128i vy_hi = _mm_cvtps_epi32(vx_hi); 101 102 __m128i vy = _mm_packs_epi32(vy_lo, vy_hi); 103 vy = _mm_adds_epi16(vy, voutput_zero_point); 104 $if DATATYPE == "QS8" and SSE < 4: 105 vy = _mm_max_epi16(vy, voutput_min); 106 vy = ${_MM_PACKXS_EPI16}(vy, vy); 107 $if DATATYPE == "QU8" or SSE == 4: 108 vy = ${_MM_MAX_EPX8}(vy, voutput_min); 109 110 _mm_storel_epi64((__m128i*) y, vy); 111 y += 8; 112 } 113 if XNN_UNLIKELY(n != 0) { 114 __m128 vx_lo = _mm_loadu_ps(x); 115 const float* x_hi = (const float*) ((uintptr_t) x + (n & (4 * sizeof(float)))); 116 __m128 vx_hi = _mm_loadu_ps(x_hi); 117 118 vx_lo = _mm_mul_ps(vx_lo, vscale); 119 vx_hi = _mm_mul_ps(vx_hi, vscale); 120 121 vx_lo = _mm_min_ps(vx_lo, voutput_max_less_zero_point); 122 vx_hi = _mm_min_ps(vx_hi, voutput_max_less_zero_point); 123 124 const __m128i vy_lo = _mm_cvtps_epi32(vx_lo); 125 const __m128i vy_hi = _mm_cvtps_epi32(vx_hi); 126 127 __m128i vy = _mm_packs_epi32(vy_lo, vy_hi); 128 vy = _mm_adds_epi16(vy, voutput_zero_point); 129 $if DATATYPE == "QS8" and SSE < 4: 130 vy = _mm_max_epi16(vy, voutput_min); 131 vy = ${_MM_PACKXS_EPI16}(vy, vy); 132 $if DATATYPE == "QU8" or SSE == 4: 133 vy = ${_MM_MAX_EPX8}(vy, voutput_min); 134 135 if (n & (4 * sizeof(float))) { 136 unaligned_store_u32(y, (uint32_t) _mm_cvtsi128_si32(vy)); 137 y += 4; 138 vy = _mm_srli_epi64(vy, 32); 139 } 140 $if SSE == 4: 141 if (n & (2 * sizeof(float))) { 142 unaligned_store_u16(y, (uint16_t) _mm_extract_epi16(vy, 0)); 143 y += 2; 144 vy = _mm_srli_epi32(vy, 16); 145 } 146 if (n & (1 * sizeof(float))) { 147 *y = (${XINT8_T}) _mm_extract_epi8(vy, 0); 148 } 149 $else: 150 { 151 uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy); 152 if (n & (2 * sizeof(float))) { 153 unaligned_store_u16(y, (uint16_t) vy_lo); 154 y += 2; 155 vy_lo >>= 16; 156 } 157 if (n & (1 * sizeof(float))) { 158 *y = (${XINT8_T}) vy_lo; 159 } 160 } 161 } 162} 163