1// Copyright 2022 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert BATCH_TILE >= 16 7$assert BATCH_TILE % 16 == 0 8$SIMD_TILE = BATCH_TILE // 16 9$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 10#include <assert.h> 11 12#include <tmmintrin.h> 13 14#include <xnnpack/common.h> 15#include <xnnpack/vlrelu.h> 16#include <xnnpack/unaligned.h> 17 18 19$XINT8_T = {"QS8": "int8_t", "QU8": "uint8_t"}[DATATYPE] 20$_MM_PACKXS_EPI16 = {"QS8": "_mm_packs_epi16", "QU8": "_mm_packus_epi16"}[DATATYPE] 21void xnn_${DATATYPE.lower()}_vlrelu_ukernel__ssse3_x${BATCH_TILE}( 22 size_t n, 23 const ${XINT8_T}* x, 24 ${XINT8_T}* y, 25 const union xnn_${DATATYPE.lower()}_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS 26{ 27 assert(n != 0); 28 assert(n % sizeof(${XINT8_T}) == 0); 29 assert(x != NULL); 30 assert(y != NULL); 31 32 const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->sse2.input_zero_point); 33 const __m128i vmultiplier_diff = _mm_load_si128((const __m128i*) params->sse2.multiplier_diff); 34 const __m128i vmultiplier_base = _mm_load_si128((const __m128i*) params->sse2.multiplier_base); 35 const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point); 36 $if DATATYPE == "QU8": 37 const __m128i vzero = _mm_setzero_si128(); 38 $if BATCH_TILE > 16: 39 for (; n >= ${BATCH_TILE} * sizeof(${XINT8_T}); n -= ${BATCH_TILE} * sizeof(${XINT8_T})) { 40 const __m128i vx${ABC[0]} = _mm_loadu_si128((const __m128i*) x); 41 $for N in range(1, SIMD_TILE): 42 const __m128i vx${ABC[N]} = _mm_loadu_si128((const __m128i*) (x + ${N * 16})); 43 x += ${BATCH_TILE}; 44 45 $for N in range(SIMD_TILE): 46 $if DATATYPE == "QU8": 47 __m128i vacc${ABC[2*N]} = _mm_unpacklo_epi8(vx${ABC[N]}, vzero); 48 __m128i vacc${ABC[2*N+1]} = _mm_unpackhi_epi8(vx${ABC[N]}, vzero); 49 $else: 50 const __m128i vm${ABC[N]} = _mm_cmpgt_epi8(_mm_setzero_si128(), vx${ABC[N]}); 51 __m128i vacc${ABC[2*N]} = _mm_unpacklo_epi8(vx${ABC[N]}, vm${ABC[N]}); 52 __m128i vacc${ABC[2*N+1]} = _mm_unpackhi_epi8(vx${ABC[N]}, vm${ABC[N]}); 53 54 $for N in range(2*SIMD_TILE): 55 __m128i vmultiplier${ABC[N]} = _mm_cmpgt_epi16(vacc${ABC[N]}, vinput_zero_point); 56 vacc${ABC[N]} = _mm_sub_epi16(vinput_zero_point, vacc${ABC[N]}); 57 58 $for N in range(2*SIMD_TILE): 59 vmultiplier${ABC[N]} = _mm_and_si128(vmultiplier${ABC[N]}, vmultiplier_diff); 60 vacc${ABC[N]} = _mm_slli_epi16(vacc${ABC[N]}, 7); 61 vmultiplier${ABC[N]} = _mm_xor_si128(vmultiplier${ABC[N]}, vmultiplier_base); 62 63 $for N in range(2*SIMD_TILE): 64 vacc${ABC[N]} = _mm_mulhrs_epi16(vacc${ABC[N]}, vmultiplier${ABC[N]}); 65 66 $for N in range(2*SIMD_TILE): 67 vacc${ABC[N]} = _mm_adds_epi16(vacc${ABC[N]}, voutput_zero_point); 68 69 $for N in range(SIMD_TILE): 70 const __m128i vy${ABC[N]} = ${_MM_PACKXS_EPI16}(vacc${ABC[2*N]}, vacc${ABC[2*N+1]}); 71 72 _mm_storeu_si128((__m128i*) y, vy${ABC[0]}); 73 $for N in range(1, SIMD_TILE): 74 _mm_storeu_si128((__m128i*) (y + ${N * 16}), vy${ABC[N]}); 75 y += ${BATCH_TILE}; 76 } 77 for (; n >= 16 * sizeof(${XINT8_T}); n -= 16 * sizeof(${XINT8_T})) { 78 const __m128i vx = _mm_loadu_si128((const __m128i*) x); 79 x += 16; 80 81 $if DATATYPE == "QU8": 82 __m128i vacc_lo = _mm_unpacklo_epi8(vx, vzero); 83 __m128i vacc_hi = _mm_unpackhi_epi8(vx, vzero); 84 $else: 85 const __m128i vm = _mm_cmpgt_epi8(_mm_setzero_si128(), vx); 86 __m128i vacc_lo = _mm_unpacklo_epi8(vx, vm); 87 __m128i vacc_hi = _mm_unpackhi_epi8(vx, vm); 88 __m128i vmultiplier_lo = _mm_cmpgt_epi16(vacc_lo, vinput_zero_point); 89 __m128i vmultiplier_hi = _mm_cmpgt_epi16(vacc_hi, vinput_zero_point); 90 vacc_lo = _mm_sub_epi16(vinput_zero_point, vacc_lo); 91 vacc_hi = _mm_sub_epi16(vinput_zero_point, vacc_hi); 92 vmultiplier_lo = _mm_and_si128(vmultiplier_lo, vmultiplier_diff); 93 vmultiplier_hi = _mm_and_si128(vmultiplier_hi, vmultiplier_diff); 94 vacc_lo = _mm_slli_epi16(vacc_lo, 7); 95 vacc_hi = _mm_slli_epi16(vacc_hi, 7); 96 vmultiplier_lo = _mm_xor_si128(vmultiplier_lo, vmultiplier_base); 97 vmultiplier_hi = _mm_xor_si128(vmultiplier_hi, vmultiplier_base); 98 vacc_lo = _mm_mulhrs_epi16(vacc_lo, vmultiplier_lo); 99 vacc_hi = _mm_mulhrs_epi16(vacc_hi, vmultiplier_hi); 100 vacc_lo = _mm_adds_epi16(vacc_lo, voutput_zero_point); 101 vacc_hi = _mm_adds_epi16(vacc_hi, voutput_zero_point); 102 103 const __m128i vy = ${_MM_PACKXS_EPI16}(vacc_lo, vacc_hi); 104 _mm_storeu_si128((__m128i*) y, vy); 105 y += 16; 106 } 107 if XNN_UNLIKELY(n != 0) { 108 assert(n >= 1 * sizeof(${XINT8_T})); 109 assert(n <= 15 * sizeof(${XINT8_T})); 110 111 const __m128i vx = _mm_loadu_si128((const __m128i*) x); 112 113 $if DATATYPE == "QU8": 114 __m128i vacc_lo = _mm_unpacklo_epi8(vx, vzero); 115 __m128i vacc_hi = _mm_unpackhi_epi8(vx, vzero); 116 $else: 117 const __m128i vm = _mm_cmpgt_epi8(_mm_setzero_si128(), vx); 118 __m128i vacc_lo = _mm_unpacklo_epi8(vx, vm); 119 __m128i vacc_hi = _mm_unpackhi_epi8(vx, vm); 120 __m128i vmultiplier_lo = _mm_cmpgt_epi16(vacc_lo, vinput_zero_point); 121 __m128i vmultiplier_hi = _mm_cmpgt_epi16(vacc_hi, vinput_zero_point); 122 vacc_lo = _mm_sub_epi16(vinput_zero_point, vacc_lo); 123 vacc_hi = _mm_sub_epi16(vinput_zero_point, vacc_hi); 124 vmultiplier_lo = _mm_and_si128(vmultiplier_lo, vmultiplier_diff); 125 vmultiplier_hi = _mm_and_si128(vmultiplier_hi, vmultiplier_diff); 126 vacc_lo = _mm_slli_epi16(vacc_lo, 7); 127 vacc_hi = _mm_slli_epi16(vacc_hi, 7); 128 vmultiplier_lo = _mm_xor_si128(vmultiplier_lo, vmultiplier_base); 129 vmultiplier_hi = _mm_xor_si128(vmultiplier_hi, vmultiplier_base); 130 vacc_lo = _mm_mulhrs_epi16(vacc_lo, vmultiplier_lo); 131 vacc_hi = _mm_mulhrs_epi16(vacc_hi, vmultiplier_hi); 132 vacc_lo = _mm_adds_epi16(vacc_lo, voutput_zero_point); 133 vacc_hi = _mm_adds_epi16(vacc_hi, voutput_zero_point); 134 135 __m128i vy = ${_MM_PACKXS_EPI16}(vacc_lo, vacc_hi); 136 if (n & (8 * sizeof(${XINT8_T}))) { 137 _mm_storel_epi64((__m128i*) y, vy); 138 vy = _mm_unpackhi_epi64(vy, vy); 139 y += 8; 140 } 141 if (n & (4 * sizeof(${XINT8_T}))) { 142 unaligned_store_u32(y, (uint32_t) _mm_cvtsi128_si32(vy)); 143 vy = _mm_srli_epi64(vy, 32); 144 y += 4; 145 } 146 uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy); 147 if (n & (2 * sizeof(${XINT8_T}))) { 148 unaligned_store_u16(y, (uint16_t) vy_lo); 149 vy_lo >>= 16; 150 y += 2; 151 } 152 if (n & (1 * sizeof(${XINT8_T}))) { 153 *y = (${XINT8_T}) vy_lo; 154 } 155 } 156} 157