xref: /aosp_15_r20/external/XNNPACK/src/qs8-igemm/MRx8c8-avx2.c.in (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1// Copyright 2020 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert REQUANTIZATION == "FP32"
7$assert DATATYPE in ["QC8", "QS8", "QU8"]
8$assert MR <= 4
9#include <assert.h>
10
11#include <immintrin.h>
12
13#include <xnnpack/igemm.h>
14#include <xnnpack/intrinsics-polyfill.h>
15#include <xnnpack/math.h>
16#include <xnnpack/unaligned.h>
17
18
19$PARAMS_STRUCT = REQUANTIZATION.lower() + "_avx2"
20$PARAMS_UNION = "xnn_%s_conv_minmax_params" % DATATYPE.lower()
21$XINT8_T = "uint8_t" if DATATYPE == "QU8" else "int8_t"
22void xnn_${DATATYPE.lower()}_igemm_minmax_fp32_ukernel_${MR}x8c8__avx2(
23    size_t mr,
24    size_t nc,
25    size_t kc,
26    size_t ks,
27    const ${XINT8_T}** restrict a,
28    const void* restrict w,
29    ${XINT8_T}* restrict c,
30    size_t cm_stride,
31    size_t cn_stride,
32    size_t a_offset,
33    const ${XINT8_T}* zero,
34    const union ${PARAMS_UNION} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
35{
36  assert(mr != 0);
37  assert(mr <= ${MR});
38  assert(nc != 0);
39  assert(kc != 0);
40  assert(ks != 0);
41  assert(ks % (${MR} * sizeof(void*)) == 0);
42  assert(a_offset % sizeof(${XINT8_T}) == 0);
43  assert(a != NULL);
44  assert(w != NULL);
45  assert(c != NULL);
46
47  kc = round_up_po2(kc, 8);
48  ${XINT8_T}* c0 = c;
49  $for M in range(1, MR):
50    ${XINT8_T}* c${M} = (${XINT8_T}*) ((uintptr_t) c${M-1} + cm_stride);
51    $if M % 2 == 0:
52      if XNN_UNPREDICTABLE(mr <= ${M}) {
53        c${M} = c${M-1};
54      }
55    $elif M + 1 == MR:
56      if XNN_UNPREDICTABLE(mr != ${M+1}) {
57        c${M} = c${M-1};
58      }
59    $else:
60      if XNN_UNPREDICTABLE(mr < ${M+1}) {
61        c${M} = c${M-1};
62      }
63
64  do {
65    $for N in range(0, 8, 2):
66      const __m128i vbias0x${N} = _mm_cvtsi32_si128(((const int*) w)[${N}]);
67      const __m128i vbias0x${N+1} = _mm_cvtsi32_si128(((const int*) w)[${N+1}]);
68      __m256i vacc0x${N}${N+1} = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x${N}), vbias0x${N+1}, 1);
69    $for M in range(1, MR):
70      $for N in range(0, 8, 2):
71        __m256i vacc${M}x${N}${N+1} = vacc0x${N}${N+1};
72    w = (const int32_t*) w + 8;
73
74    size_t p = ks;
75    $if DATATYPE == "QU8":
76      const __m256i vb_zero_point = _mm256_load_si256((const __m256i*) params->${PARAMS_STRUCT}.kernel_zero_point);
77    do {
78      $for M in range(MR):
79        const ${XINT8_T}* restrict a${M} = a[${M}];
80        if XNN_UNPREDICTABLE(a${M} != zero) {
81          a${M} = (const ${XINT8_T}*) ((uintptr_t) a${M} + a_offset);
82        }
83      a += ${MR};
84
85      size_t k = 0;
86      while (k < kc) {
87        $for M in range(MR):
88          const __m128i va${M} = _mm_broadcastq_epi64(_mm_loadl_epi64((const __m128i*) a${M}));
89          $if DATATYPE == "QU8":
90            const __m256i vxa${M} = _mm256_cvtepu8_epi16(va${M});
91          $else:
92            const __m256i vxa${M} = _mm256_cvtepi8_epi16(va${M});
93          a${M} += 8;
94
95        $for N in range(0, 8, 2):
96          $if N == 0:
97            const __m128i vb${N}${N+1} = _mm_load_si128((const __m128i*) w);
98          $else:
99            const __m128i vb${N}${N+1} = _mm_load_si128((const __m128i*) ((const ${XINT8_T}*) w + ${N * 8}));
100          $if DATATYPE == "QU8":
101            const __m256i vxb${N}${N+1} = _mm256_sub_epi16(_mm256_cvtepu8_epi16(vb${N}${N+1}), vb_zero_point);
102          $else:
103            const __m256i vxb${N}${N+1} = _mm256_cvtepi8_epi16(vb${N}${N+1});
104
105          $for M in range(MR):
106            vacc${M}x${N}${N+1} = _mm256_add_epi32(vacc${M}x${N}${N+1}, _mm256_madd_epi16(vxa${M}, vxb${N}${N+1}));
107
108        w = (const void*) ((const ${XINT8_T}*) w + 64);
109        k += 8 * sizeof(${XINT8_T});
110      }
111      p -= ${MR} * sizeof(void*);
112    } while (p != 0);
113
114    $for M in range(MR):
115      const __m256i vacc${M}x0213 = _mm256_hadd_epi32(vacc${M}x01, vacc${M}x23);
116      const __m256i vacc${M}x4657 = _mm256_hadd_epi32(vacc${M}x45, vacc${M}x67);
117
118    $for M in range(MR):
119      const __m256i vacc${M}x02461357 = _mm256_hadd_epi32(vacc${M}x0213, vacc${M}x4657);
120
121    const __m256i vpermute_mask = _mm256_set_epi32(7, 3, 6, 2, 5, 1, 4, 0);
122    $for M in range(MR):
123      __m256i vacc${M}x01234567 = _mm256_permutevar8x32_epi32(vacc${M}x02461357, vpermute_mask);
124
125    $for M in range(MR):
126      __m256 vscaled${M}x01234567 = _mm256_cvtepi32_ps(vacc${M}x01234567);
127
128    $if DATATYPE == "QC8":
129      const __m256 vscale01234567 = _mm256_load_ps(w);
130      w = (const void*) ((const float*) w + 8);
131      $for M in range(MR):
132        vscaled${M}x01234567 = _mm256_mul_ps(vscaled${M}x01234567, vscale01234567);
133    $else:
134      const __m256 vscale = _mm256_load_ps(params->fp32_avx2.scale);
135      $for M in range(MR):
136        vscaled${M}x01234567 = _mm256_mul_ps(vscaled${M}x01234567, vscale);
137
138    const __m256 voutput_max_less_zero_point = _mm256_load_ps(params->${PARAMS_STRUCT}.output_max_less_zero_point);
139    $for M in range(MR):
140      vscaled${M}x01234567 = _mm256_min_ps(vscaled${M}x01234567, voutput_max_less_zero_point);
141
142    $for M in range(MR):
143      vacc${M}x01234567 = _mm256_cvtps_epi32(vscaled${M}x01234567);
144
145    const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->${PARAMS_STRUCT}.output_zero_point);
146    $for M in range(0, MR, 2):
147      __m256i vacc${M}${min(M+1, MR-1)}x01234567 = _mm256_adds_epi16(_mm256_packs_epi32(vacc${M}x01234567, vacc${min(M+1, MR-1)}x01234567), voutput_zero_point);
148
149    $for M in range(0, MR, 2):
150      vacc${M}${min(M+1, MR-1)}x01234567 = _mm256_permute4x64_epi64(vacc${M}${min(M+1, MR-1)}x01234567, _MM_SHUFFLE(3, 1, 2, 0));
151
152    $if DATATYPE == "QU8":
153      $if MR > 2:
154        __m256i vout = _mm256_packus_epi16(vacc0${min(1, MR-1)}x01234567, vacc${min(2, MR-1)}${min(3, MR-1)}x01234567);
155      $else:
156        __m256i vout = _mm256_packus_epi16(vacc0${min(1, MR-1)}x01234567, vacc0${min(1, MR-1)}x01234567);
157
158      vout = _mm256_max_epu8(vout, _mm256_load_si256((const __m256i*) params->${PARAMS_STRUCT}.output_min));
159    $else:
160      $if MR > 2:
161        __m256i vout = _mm256_packs_epi16(vacc0${min(1, MR-1)}x01234567, vacc${min(2, MR-1)}${min(3, MR-1)}x01234567);
162      $else:
163        __m256i vout = _mm256_packs_epi16(vacc0${min(1, MR-1)}x01234567, vacc0${min(1, MR-1)}x01234567);
164
165      vout = _mm256_max_epi8(vout, _mm256_load_si256((const __m256i*) params->${PARAMS_STRUCT}.output_min));
166
167    __m128i vout_lo = _mm256_castsi256_si128(vout);
168    __m128i vout_hi = _mm256_extracti128_si256(vout, 1);
169
170    if (nc >= 8) {
171      $if MR > 3:
172        _mm_storeh_pi((__m64*) c3, _mm_castsi128_ps(vout_hi));
173      $if MR > 2:
174        _mm_storeh_pi((__m64*) c2, _mm_castsi128_ps(vout_lo));
175      $if MR > 1:
176        _mm_storel_epi64((__m128i*) c1, vout_hi);
177      _mm_storel_epi64((__m128i*) c0, vout_lo);
178
179      $for M in reversed(range(MR)):
180        c${M} = (${XINT8_T}*) ((uintptr_t) c${M} + cn_stride);
181
182      a = (const ${XINT8_T}**restrict) ((uintptr_t) a - ks);
183
184      nc -= 8;
185    } else {
186      if (nc & 4) {
187        $if MR > 3:
188          unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout_hi, 2));
189        $if MR > 2:
190          unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout_lo, 2));
191        $if MR > 1:
192          _mm_storeu_si32(c1, vout_hi);
193        _mm_storeu_si32(c0, vout_lo);
194
195        $for M in reversed(range(MR)):
196          c${M} += 4;
197
198        vout_lo = _mm_srli_epi64(vout_lo, 32);
199        vout_hi = _mm_srli_epi64(vout_hi, 32);
200      }
201      if (nc & 2) {
202        $if MR > 3:
203          unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout_hi, 4));
204        $if MR > 2:
205          unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout_lo, 4));
206        $if MR > 1:
207          unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout_hi, 0));
208        unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout_lo, 0));
209
210        $for M in reversed(range(MR)):
211          c${M} += 2;
212
213        vout_lo = _mm_srli_epi32(vout_lo, 16);
214        vout_hi = _mm_srli_epi32(vout_hi, 16);
215      }
216      if (nc & 1) {
217        $if MR > 3:
218          *c3 = (${XINT8_T}) _mm_extract_epi8(vout_hi, 8);
219        $if MR > 2:
220          *c2 = (${XINT8_T}) _mm_extract_epi8(vout_lo, 8);
221        $if MR > 1:
222          *c1 = (${XINT8_T}) _mm_extract_epi8(vout_hi, 0);
223        *c0 = (${XINT8_T}) _mm_extract_epi8(vout_lo, 0);
224      }
225
226      nc = 0;
227    }
228  } while (nc != 0);
229}
230