1// Copyright 2021 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert DATATYPE in ["QS8", "QU8"] 7$assert BATCH_TILE % 16 == 0 8$assert BATCH_TILE >= 16 9$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 10#include <assert.h> 11 12#include <immintrin.h> 13 14#include <xnnpack/intrinsics-polyfill.h> 15#include <xnnpack/vadd.h> 16 17 18$XINT8_T = {"QS8": "int8_t", "QU8": "uint8_t"}[DATATYPE] 19$_MM512_CVTEPX8_EPI32 = {"QS8": "_mm512_cvtepi8_epi32", "QU8": "_mm512_cvtepu8_epi32"}[DATATYPE] 20$_MM256_PACKXS_EPI16 = {"QS8": "_mm256_packs_epi16", "QU8": "_mm256_packus_epi16"}[DATATYPE] 21$_MM_PACKXS_EPI16 = {"QS8": "_mm_packs_epi16", "QU8": "_mm_packus_epi16"}[DATATYPE] 22$_MM256_MIN_EPX8 = {"QS8": "_mm256_min_epi8", "QU8": "_mm256_min_epu8"}[DATATYPE] 23$_MM256_MAX_EPX8 = {"QS8": "_mm256_max_epi8", "QU8": "_mm256_max_epu8"}[DATATYPE] 24$_MM_MIN_EPX8 = {"QS8": "_mm_min_epi8", "QU8": "_mm_min_epu8"}[DATATYPE] 25$_MM_MAX_EPX8 = {"QS8": "_mm_max_epi8", "QU8": "_mm_max_epu8"}[DATATYPE] 26void xnn_${DATATYPE.lower()}_vaddc_minmax_ukernel__avx512skx_mul32_ld128_x${BATCH_TILE}( 27 size_t n, 28 const ${XINT8_T}* input_a, 29 const ${XINT8_T}* input_b, 30 ${XINT8_T}* output, 31 const union xnn_${DATATYPE.lower()}_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) 32{ 33 const __m512i va_multiplier = _mm512_load_si512(params->avx512.a_multiplier); 34 const __m128i vshift = _mm_load_si128((const __m128i*) params->avx512.shift); 35 $if BATCH_TILE > 16: 36 const __m512i voutput_zero_point = _mm512_load_si512(params->avx512.output_zero_point); 37 const __m256i voutput_min = _mm256_load_si256((const __m256i*) params->avx512.output_min); 38 const __m256i voutput_max = _mm256_load_si256((const __m256i*) params->avx512.output_max); 39 $else: 40 const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx512.output_zero_point); 41 const __m128i voutput_min = _mm_load_si128((const __m128i*) params->avx512.output_min); 42 const __m128i voutput_max = _mm_load_si128((const __m128i*) params->avx512.output_max); 43 44 const __m512i vbias = _mm512_add_epi32( 45 _mm512_broadcastd_epi32(_mm_cvtsi32_si128(params->avx512.b_multiplier[0] * (int32_t) *input_b)), 46 _mm512_load_si512(params->avx512.bias)); 47 for (; n >= ${BATCH_TILE} * sizeof(${XINT8_T}); n -= ${BATCH_TILE} * sizeof(${XINT8_T})) { 48 const __m512i va${ABC[0:16]} = ${_MM512_CVTEPX8_EPI32}(_mm_loadu_si128((const __m128i*) input_a)); 49 $for N in range(16, BATCH_TILE, 16): 50 const __m512i va${ABC[N:N+16]} = ${_MM512_CVTEPX8_EPI32}(_mm_loadu_si128((const __m128i*) (input_a + ${N}))); 51 input_a += ${BATCH_TILE}; 52 53 $for N in range(0, BATCH_TILE, 16): 54 __m512i vacc${ABC[N:N+16]} = _mm512_add_epi32(vbias, _mm512_mullo_epi32(va${ABC[N:N+16]}, va_multiplier)); 55 56 $for N in range(0, BATCH_TILE, 16): 57 vacc${ABC[N:N+16]} = _mm512_sra_epi32(vacc${ABC[N:N+16]}, vshift); 58 59 $for N in range(0, BATCH_TILE, 32): 60 $if N + 16 < BATCH_TILE: 61 __m512i vout${ABC[N:N+4]}${ABC[N+16:N+20]}${ABC[N+4:N+8]}${ABC[N+20:N+24]}${ABC[N+8:N+12]}${ABC[N+24:N+28]}${ABC[N+12:N+16]}${ABC[N+28:N+32]} = _mm512_adds_epi16(_mm512_packs_epi32(vacc${ABC[N:N+16]}, vacc${ABC[N+16:N+32]}), voutput_zero_point); 62 $elif BATCH_TILE > 16: 63 __m256i vout${ABC[N:N+4]}${ABC[N+8:N+12]}${ABC[N+4:N+8]}${ABC[N+12:N+16]} = _mm256_adds_epi16(_mm256_packs_epi32(_mm512_castsi512_si256(vacc${ABC[N:N+16]}), _mm512_extracti32x8_epi32(vacc${ABC[N:N+16]}, 1)), _mm512_castsi512_si256(voutput_zero_point)); 64 $else: 65 __m256i vout${ABC[N:N+4]}${ABC[N+8:N+12]}${ABC[N+4:N+8]}${ABC[N+12:N+16]} = _mm256_adds_epi16(_mm256_packs_epi32(_mm512_castsi512_si256(vacc${ABC[N:N+16]}), _mm512_extracti32x8_epi32(vacc${ABC[N:N+16]}, 1)), voutput_zero_point); 66 67 $for N in range(0, BATCH_TILE, 32): 68 $if N + 16 < BATCH_TILE: 69 __m256i vout${ABC[N:N+32]} = _mm256_permutevar8x32_epi32(${_MM256_PACKXS_EPI16}(_mm512_castsi512_si256(vout${ABC[N:N+4]}${ABC[N+16:N+20]}${ABC[N+4:N+8]}${ABC[N+20:N+24]}${ABC[N+8:N+12]}${ABC[N+24:N+28]}${ABC[N+12:N+16]}${ABC[N+28:N+32]}), _mm512_extracti32x8_epi32(vout${ABC[N:N+4]}${ABC[N+16:N+20]}${ABC[N+4:N+8]}${ABC[N+20:N+24]}${ABC[N+8:N+12]}${ABC[N+24:N+28]}${ABC[N+12:N+16]}${ABC[N+28:N+32]}, 1)), _mm256_set_epi32(7, 3, 5, 1, 6, 2, 4, 0)); 70 $else: 71 __m128i vout${ABC[N:N+16]} = _mm_shuffle_epi32(${_MM_PACKXS_EPI16}(_mm256_castsi256_si128(vout${ABC[N:N+4]}${ABC[N+8:N+12]}${ABC[N+4:N+8]}${ABC[N+12:N+16]}), _mm256_extracti128_si256(vout${ABC[N:N+4]}${ABC[N+8:N+12]}${ABC[N+4:N+8]}${ABC[N+12:N+16]}, 1)), _MM_SHUFFLE(3, 1, 2, 0)); 72 73 $for N in range(0, BATCH_TILE, 32): 74 $if N + 16 < BATCH_TILE: 75 vout${ABC[N:N+32]} = ${_MM256_MAX_EPX8}(vout${ABC[N:N+32]}, voutput_min); 76 $elif BATCH_TILE > 16: 77 vout${ABC[N:N+16]} = ${_MM_MAX_EPX8}(vout${ABC[N:N+16]}, _mm256_castsi256_si128(voutput_min)); 78 $else: 79 vout${ABC[N:N+16]} = ${_MM_MAX_EPX8}(vout${ABC[N:N+16]}, voutput_min); 80 81 $for N in range(0, BATCH_TILE, 32): 82 $if N + 16 < BATCH_TILE: 83 vout${ABC[N:N+32]} = ${_MM256_MIN_EPX8}(vout${ABC[N:N+32]}, voutput_max); 84 $elif BATCH_TILE > 16: 85 vout${ABC[N:N+16]} = ${_MM_MIN_EPX8}(vout${ABC[N:N+16]}, _mm256_castsi256_si128(voutput_max)); 86 $else: 87 vout${ABC[N:N+16]} = ${_MM_MIN_EPX8}(vout${ABC[N:N+16]}, voutput_max); 88 89 $if BATCH_TILE >= 32: 90 _mm256_storeu_si256((__m256i*) output, vout${ABC[0:32]}); 91 $else: 92 _mm_storeu_si128((__m128i*) output, vout${ABC[0:16]}); 93 $for N in range(32, BATCH_TILE, 32): 94 $if N + 8 < BATCH_TILE: 95 _mm256_storeu_si256((__m256i*) (output + ${N}), vout${ABC[N:N+32]}); 96 $else: 97 _mm_storeu_si128((__m128i*) (output + ${N}), vout${ABC[N:N+16]}); 98 output += ${BATCH_TILE}; 99 } 100 if XNN_UNLIKELY(n != 0) { 101 ${"do " if BATCH_TILE > 16 else ""}{ 102 $if BATCH_TILE > 16: 103 const __m512i va${ABC[0:16]} = ${_MM512_CVTEPX8_EPI32}(_mm_loadu_si128((const __m128i*) input_a)); 104 input_a += 16; 105 $else: 106 const __mmask16 vmask = _cvtu32_mask16((uint32_t) ((UINT32_C(1) << n) - UINT32_C(1))); 107 const __m512i va${ABC[0:16]} = ${_MM512_CVTEPX8_EPI32}(_mm_maskz_loadu_epi8(vmask, input_a)); 108 109 __m512i vacc${ABC[0:16]} = _mm512_add_epi32(vbias, _mm512_mullo_epi32(va${ABC[0:16]}, va_multiplier)); 110 111 vacc${ABC[0:16]} = _mm512_sra_epi32(vacc${ABC[0:16]}, vshift); 112 113 $if BATCH_TILE > 16: 114 __m256i vout${ABC[0:4]}${ABC[8:12]}${ABC[4:8]}${ABC[12:16]} = _mm256_adds_epi16(_mm256_packs_epi32(_mm512_castsi512_si256(vacc${ABC[0:16]}), _mm512_extracti32x8_epi32(vacc${ABC[0:16]}, 1)), _mm512_castsi512_si256(voutput_zero_point)); 115 $else: 116 __m256i vout${ABC[0:4]}${ABC[8:12]}${ABC[4:8]}${ABC[12:16]} = _mm256_adds_epi16(_mm256_packs_epi32(_mm512_castsi512_si256(vacc${ABC[0:16]}), _mm512_extracti32x8_epi32(vacc${ABC[0:16]}, 1)), voutput_zero_point); 117 __m128i vout${ABC[0:16]} = _mm_shuffle_epi32(${_MM_PACKXS_EPI16}(_mm256_castsi256_si128(vout${ABC[N:N+4]}${ABC[N+8:N+12]}${ABC[N+4:N+8]}${ABC[N+12:N+16]}), _mm256_extracti128_si256(vout${ABC[N:N+4]}${ABC[N+8:N+12]}${ABC[N+4:N+8]}${ABC[N+12:N+16]}, 1)), _MM_SHUFFLE(3, 1, 2, 0)); 118 $if BATCH_TILE > 16: 119 vout${ABC[0:16]} = ${_MM_MAX_EPX8}(vout${ABC[0:16]}, _mm256_castsi256_si128(voutput_min)); 120 vout${ABC[0:16]} = ${_MM_MIN_EPX8}(vout${ABC[0:16]}, _mm256_castsi256_si128(voutput_max)); 121 $else: 122 vout${ABC[0:16]} = ${_MM_MAX_EPX8}(vout${ABC[0:16]}, voutput_min); 123 vout${ABC[0:16]} = ${_MM_MIN_EPX8}(vout${ABC[0:16]}, voutput_max); 124 125 $if BATCH_TILE > 16: 126 if XNN_LIKELY(n >= (16 * sizeof(${XINT8_T}))) { 127 _mm_storeu_si128((__m128i*) output, vout${ABC[0:16]}); 128 output += 16; 129 n -= 16 * sizeof(${XINT8_T}); 130 } else { 131 const __mmask16 vmask = _cvtu32_mask16((uint32_t) ((UINT32_C(1) << n) - UINT32_C(1))); 132 _mm_mask_storeu_epi8(output, vmask, vout${ABC[0:16]}); 133 n = 0; 134 } 135 $else: 136 _mm_mask_storeu_epi8(output, vmask, vout${ABC[0:16]}); 137 }${" while (n != 0);" if BATCH_TILE > 16 else ""} 138 } 139} 140