xref: /aosp_15_r20/external/XNNPACK/src/qs8-vaddc/sse-mul32-ld32.c.in (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1// Copyright 2020 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert DATATYPE in ["QS8", "QU8"]
7$assert SSE == 4
8$assert not XOP or AVX
9$assert BATCH_TILE % 8 == 0
10$assert BATCH_TILE >= 8
11$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
12#include <assert.h>
13
14$if XOP:
15  #if defined(__GNUC__) || defined(__clang__)
16    #include <x86intrin.h>
17  #else
18    #include <immintrin.h>
19    #include <ammintrin.h>
20  #endif
21$else:
22  #include <immintrin.h>
23
24#include <xnnpack/intrinsics-polyfill.h>
25#include <xnnpack/unaligned.h>
26#include <xnnpack/vadd.h>
27
28
29$PARAMS_STRUCT = {"QS8": "sse4_mul32", "QU8": "sse4"}[DATATYPE]
30$XINT8_T = {"QS8": "int8_t", "QU8": "uint8_t"}[DATATYPE]
31$_MM_CVTEPX8_EPI32 = {"QS8": "_mm_cvtepi8_epi32", "QU8": "_mm_cvtepu8_epi32"}[DATATYPE]
32$_MM_PACKXS_EPI16 = {"QS8": "_mm_packs_epi16", "QU8": "_mm_packus_epi16"}[DATATYPE]
33$_MM_MIN_EPX8 = {"QS8": "_mm_min_epi8", "QU8": "_mm_min_epu8"}[DATATYPE]
34$_MM_MAX_EPX8 = {"QS8": "_mm_max_epi8", "QU8": "_mm_max_epu8"}[DATATYPE]
35$ISA = "xop" if XOP else "avx" if AVX else {4: "sse41"}[SSE]
36void xnn_${DATATYPE.lower()}_vaddc_minmax_ukernel__${ISA}_mul32_ld32_x${BATCH_TILE}(
37    size_t n,
38    const ${XINT8_T}* input_a,
39    const ${XINT8_T}* input_b,
40    ${XINT8_T}* output,
41    const union xnn_${DATATYPE.lower()}_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
42{
43  const __m128i va_multiplier = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.a_multiplier);
44  const __m128i vshift = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.shift);
45  const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_zero_point);
46  const __m128i voutput_min = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_min);
47  const __m128i voutput_max = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_max);
48
49  __m128i vbias = _mm_cvtsi32_si128(params->${PARAMS_STRUCT}.b_multiplier[0] * (int32_t) *input_b);
50  vbias = _mm_shuffle_epi32(vbias, _MM_SHUFFLE(0, 0, 0, 0));
51  vbias = _mm_add_epi32(vbias, _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.bias));
52  for (; n >= ${BATCH_TILE} * sizeof(${XINT8_T}); n -= ${BATCH_TILE} * sizeof(${XINT8_T})) {
53    const __m128i va${ABC[0:4]} = ${_MM_CVTEPX8_EPI32}(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
54    $for N in range(4, BATCH_TILE, 4):
55      const __m128i va${ABC[N:N+4]} = ${_MM_CVTEPX8_EPI32}(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + ${N})));
56    input_a += ${BATCH_TILE};
57    input_b += ${BATCH_TILE};
58
59    $if XOP:
60      $for N in range(0, BATCH_TILE, 4):
61        __m128i vacc${ABC[N:N+4]} = _mm_macc_epi32(va${ABC[N:N+4]}, va_multiplier, vbias);
62    $else:
63      $for N in range(0, BATCH_TILE, 4):
64        __m128i vacc${ABC[N:N+4]} = _mm_add_epi32(vbias, _mm_mullo_epi32(va${ABC[N:N+4]}, va_multiplier));
65
66    $for N in range(0, BATCH_TILE, 4):
67      vacc${ABC[N:N+4]} = _mm_sra_epi32(vacc${ABC[N:N+4]}, vshift);
68
69    $for N in range(0, BATCH_TILE, 8):
70      const __m128i vout${ABC[N:N+8]} = _mm_adds_epi16(_mm_packs_epi32(vacc${ABC[N:N+4]}, vacc${ABC[N+4:N+8]}), voutput_zero_point);
71
72    $for N in range(0, BATCH_TILE, 16):
73      $if N + 8 < BATCH_TILE:
74        __m128i vout${ABC[N:N+16]} = ${_MM_PACKXS_EPI16}(vout${ABC[N:N+8]}, vout${ABC[N+8:N+16]});
75      $else:
76        __m128i vout${ABC[N:N+8]}${ABC[N:N+8]} = ${_MM_PACKXS_EPI16}(vout${ABC[N:N+8]}, vout${ABC[N:N+8]});
77
78    $for N in range(0, BATCH_TILE, 16):
79      $if N + 8 < BATCH_TILE:
80        vout${ABC[N:N+16]} = ${_MM_MAX_EPX8}(vout${ABC[N:N+16]}, voutput_min);
81      $else:
82        vout${ABC[N:N+8]}${ABC[N:N+8]} = ${_MM_MAX_EPX8}(vout${ABC[N:N+8]}${ABC[N:N+8]}, voutput_min);
83
84    $for N in range(0, BATCH_TILE, 16):
85      $if N + 8 < BATCH_TILE:
86        vout${ABC[N:N+16]} = ${_MM_MIN_EPX8}(vout${ABC[N:N+16]}, voutput_max);
87      $else:
88        vout${ABC[N:N+8]}${ABC[N:N+8]} = ${_MM_MIN_EPX8}(vout${ABC[N:N+8]}${ABC[N:N+8]}, voutput_max);
89
90    $if BATCH_TILE >= 16:
91      _mm_storeu_si128((__m128i*) output, vout${ABC[0:16]});
92    $else:
93      _mm_storel_epi64((__m128i*) output, vout${ABC[0:8]}${ABC[0:8]});
94    $for N in range(16, BATCH_TILE, 16):
95      $if N + 8 < BATCH_TILE:
96        _mm_storeu_si128((__m128i*) (output + ${N}), vout${ABC[N:N+16]});
97      $else:
98        _mm_storel_epi64((__m128i*) (output + ${N}), vout${ABC[N:N+8]}${ABC[N:N+8]});
99    output += ${BATCH_TILE};
100  }
101  if XNN_UNLIKELY(n != 0) {
102    ${"do " if BATCH_TILE > 8 else ""}{
103      const __m128i va${ABC[0:4]} = ${_MM_CVTEPX8_EPI32}(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
104      const __m128i va${ABC[4:8]} = ${_MM_CVTEPX8_EPI32}(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
105      $if BATCH_TILE > 8:
106        input_a += 8;
107
108      $if XOP:
109        __m128i vacc${ABC[0:4]} = _mm_macc_epi32(va${ABC[0:4]}, va_multiplier, vbias);
110        __m128i vacc${ABC[4:8]} = _mm_macc_epi32(va${ABC[4:8]}, va_multiplier, vbias);
111      $else:
112        __m128i vacc${ABC[0:4]} = _mm_add_epi32(vbias, _mm_mullo_epi32(va${ABC[0:4]}, va_multiplier));
113        __m128i vacc${ABC[4:8]} = _mm_add_epi32(vbias, _mm_mullo_epi32(va${ABC[4:8]}, va_multiplier));
114
115      vacc${ABC[0:4]} = _mm_sra_epi32(vacc${ABC[0:4]}, vshift);
116      vacc${ABC[4:8]} = _mm_sra_epi32(vacc${ABC[4:8]}, vshift);
117
118      const __m128i vout${ABC[0:8]} = _mm_adds_epi16(_mm_packs_epi32(vacc${ABC[0:4]}, vacc${ABC[4:8]}), voutput_zero_point);
119
120      __m128i vout${ABC[0:8]}${ABC[0:8]} = ${_MM_PACKXS_EPI16}(vout${ABC[0:8]}, vout${ABC[0:8]});
121      vout${ABC[0:8]}${ABC[0:8]} = ${_MM_MAX_EPX8}(vout${ABC[0:8]}${ABC[0:8]}, voutput_min);
122      vout${ABC[0:8]}${ABC[0:8]} = ${_MM_MIN_EPX8}(vout${ABC[0:8]}${ABC[0:8]}, voutput_max);
123
124      $if BATCH_TILE > 8:
125        if XNN_LIKELY(n >= (8 * sizeof(${XINT8_T}))) {
126          _mm_storel_epi64((__m128i*) output, vout${ABC[0:8]}${ABC[0:8]});
127          output += 8;
128          n -= 8 * sizeof(${XINT8_T});
129        } else {
130          if (n & (4 * sizeof(${XINT8_T}))) {
131            unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]}));
132            vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi64(vout${ABC[0:8]}${ABC[0:8]}, 32);
133            output += 4;
134          }
135          if (n & (2 * sizeof(${XINT8_T}))) {
136            unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout${ABC[0:8]}${ABC[0:8]}, 0));
137            vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi32(vout${ABC[0:8]}${ABC[0:8]}, 16);
138            output += 2;
139          }
140          if (n & (1 * sizeof(${XINT8_T}))) {
141            *output = (${XINT8_T}) _mm_extract_epi8(vout${ABC[0:8]}${ABC[0:8]}, 0);
142          }
143          n = 0;
144        }
145      $else:
146        if (n & (4 * sizeof(${XINT8_T}))) {
147          unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]}));
148          vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi64(vout${ABC[0:8]}${ABC[0:8]}, 32);
149          output += 4;
150        }
151        if (n & (2 * sizeof(${XINT8_T}))) {
152          unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout${ABC[0:8]}${ABC[0:8]}, 0));
153          vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi32(vout${ABC[0:8]}${ABC[0:8]}, 16);
154          output += 2;
155        }
156        if (n & (1 * sizeof(${XINT8_T}))) {
157          *output = (${XINT8_T}) _mm_extract_epi8(vout${ABC[0:8]}${ABC[0:8]}, 0);
158        }
159    }${" while (n != 0);" if BATCH_TILE > 8 else ""}
160  }
161}
162