xref: /aosp_15_r20/external/XNNPACK/src/qs8-igemm/MRx4c2-sse.c.in (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1// Copyright 2020 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert SSE in [2, 4]
7$assert not XOP or AVX
8$assert not AVX or SSE == 4
9$assert REQUANTIZATION == "FP32"
10$assert DATATYPE in ["QC8", "QS8", "QU8"]
11$assert VARIANT in ["LD64", "LD128"]
12$assert MR <= 4
13#include <assert.h>
14
15$if XOP:
16  #if defined(__GNUC__) || defined(__clang__)
17    #include <x86intrin.h>
18  #else
19    #include <immintrin.h>
20    #include <ammintrin.h>
21  #endif
22$else:
23  $SSE_HEADER = {2: "emmintrin.h", 4: "smmintrin.h"}[SSE]
24  #include <${SSE_HEADER}>
25
26#include <xnnpack/igemm.h>
27#include <xnnpack/math.h>
28#include <xnnpack/unaligned.h>
29
30
31$PARAMS_STRUCT = REQUANTIZATION.lower() + "_" + ("sse4" if SSE == 4 and DATATYPE != "QU8" else "sse2")
32$PARAMS_UNION = "xnn_%s_conv_minmax_params" % DATATYPE.lower()
33$XINT8_T = "uint8_t" if DATATYPE == "QU8" else "int8_t"
34$ISA = "xop" if XOP else "avx" if AVX else {2: "sse2", 3: "ssse3", 4: "sse41"}[SSE]
35void xnn_${DATATYPE.lower()}_igemm_minmax_fp32_ukernel_${MR}x4c2__${ISA}_${VARIANT.lower()}(
36    size_t mr,
37    size_t nc,
38    size_t kc,
39    size_t ks,
40    const ${XINT8_T}** restrict a,
41    const void* restrict w,
42    ${XINT8_T}* restrict c,
43    size_t cm_stride,
44    size_t cn_stride,
45    size_t a_offset,
46    const ${XINT8_T}* zero,
47    const union ${PARAMS_UNION} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
48{
49  assert(mr != 0);
50  assert(mr <= ${MR});
51  assert(nc != 0);
52  assert(kc != 0);
53  assert(ks != 0);
54  assert(ks % (${MR} * sizeof(void*)) == 0);
55  assert(a_offset % sizeof(${XINT8_T}) == 0);
56  assert(a != NULL);
57  assert(w != NULL);
58  assert(c != NULL);
59
60  kc = round_up_po2(kc, 2 * sizeof(${XINT8_T}));
61  ${XINT8_T}* c0 = c;
62  $for M in range(1, MR):
63    ${XINT8_T}* c${M} = (${XINT8_T}*) ((uintptr_t) c${M-1} + cm_stride);
64    $if M % 2 == 0:
65      if XNN_UNPREDICTABLE(mr <= ${M}) {
66        c${M} = c${M-1};
67      }
68    $elif M + 1 == MR:
69      if XNN_UNPREDICTABLE(mr != ${M+1}) {
70        c${M} = c${M-1};
71      }
72    $else:
73      if XNN_UNPREDICTABLE(mr < ${M+1}) {
74        c${M} = c${M-1};
75      }
76
77  do {
78    __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
79    $for M in range(1, MR):
80      __m128i vacc${M}x0123 = vacc0x0123;
81    w = (const void*) ((const int32_t*) w + 4);
82
83    size_t p = ks;
84    do {
85      $for M in range(MR):
86        const ${XINT8_T}* restrict a${M} = a[${M}];
87        if XNN_UNPREDICTABLE(a${M} != zero) {
88          a${M} = (const ${XINT8_T}*) ((uintptr_t) a${M} + a_offset);
89        }
90      a += ${MR};
91
92      size_t k = kc;
93      $if DATATYPE == "QU8":
94        const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.kernel_zero_point);
95        $if SSE < 4 or VARIANT == "LD128":
96          const __m128i vzero = _mm_setzero_si128();
97      while (k >= 8 * sizeof(${XINT8_T})) {
98        $for M in range(MR):
99          const __m128i va${M} = _mm_loadl_epi64((const __m128i*) a${M});
100          $if DATATYPE == "QU8":
101            $if SSE == 4:
102              const __m128i vxa${M} = _mm_cvtepu8_epi16(va${M});
103            $else:
104              const __m128i vxa${M} = _mm_unpacklo_epi8(va${M}, vzero);
105          $else:
106            $if SSE == 4:
107              const __m128i vxa${M} = _mm_cvtepi8_epi16(va${M});
108            $else:
109              const __m128i vxa${M} = _mm_srai_epi16(_mm_unpacklo_epi8(va${M}, va${M}), 8);
110          a${M} += 8;
111
112        $if VARIANT == "LD128":
113          $for K in range(0, 4, 2):
114            $if K == 0:
115              const __m128i vb${K}${K+1} = _mm_loadu_si128((const __m128i*) w);
116            $else:
117              const __m128i vb${K}${K+1} = _mm_loadu_si128((const __m128i*) ((const ${XINT8_T}*) w + ${K * 8}));
118            $if DATATYPE == "QU8":
119              const __m128i vxb${K} = _mm_sub_epi16(_mm_unpacklo_epi8(vb${K}${K+1}, vzero), vb_zero_point);
120              const __m128i vxb${K+1} = _mm_sub_epi16(_mm_unpackhi_epi8(vb${K}${K+1}, vzero), vb_zero_point);
121            $elif SSE == 4:
122              const __m128i vxb${K} = _mm_cvtepi8_epi16(vb${K}${K+1});
123              const __m128i vxb${K+1} = _mm_srai_epi16(_mm_unpackhi_epi8(vb${K}${K+1}, vb${K}${K+1}), 8);
124            $else:
125              const __m128i vsb${K}${K+1} = _mm_cmpgt_epi8(_mm_setzero_si128(), vb${K}${K+1});
126              const __m128i vxb${K} = _mm_unpacklo_epi8(vb${K}${K+1}, vsb${K}${K+1});
127              const __m128i vxb${K+1} = _mm_unpackhi_epi8(vb${K}${K+1}, vsb${K}${K+1});
128
129            $for M in range(MR):
130              $if XOP:
131                vacc${M}x0123 = _mm_maddd_epi16(
132                  _mm_shuffle_epi32(vxa${M}, _MM_SHUFFLE(${K}, ${K}, ${K}, ${K})), vxb${K}, vacc${M}x0123);
133              $else:
134                vacc${M}x0123 = _mm_add_epi32(vacc${M}x0123,
135                  _mm_madd_epi16(_mm_shuffle_epi32(vxa${M}, _MM_SHUFFLE(${K}, ${K}, ${K}, ${K})), vxb${K}));
136
137            $for M in range(MR):
138              $if XOP:
139                vacc${M}x0123 = _mm_maddd_epi16(
140                  _mm_shuffle_epi32(vxa${M}, _MM_SHUFFLE(${K+1}, ${K+1}, ${K+1}, ${K+1})), vxb${K+1}, vacc${M}x0123);
141              $else:
142                vacc${M}x0123 = _mm_add_epi32(vacc${M}x0123,
143                  _mm_madd_epi16(_mm_shuffle_epi32(vxa${M}, _MM_SHUFFLE(${K+1}, ${K+1}, ${K+1}, ${K+1})), vxb${K+1}));
144        $else:
145          $for K in range(4):
146            $if K == 0:
147              const __m128i vb${K} = _mm_loadl_epi64((const __m128i*) w);
148            $else:
149              const __m128i vb${K} = _mm_loadl_epi64((const __m128i*) ((const ${XINT8_T}*) w + ${K * 8}));
150            $if DATATYPE == "QU8":
151              $if SSE == 4:
152                const __m128i vxb${K} = _mm_sub_epi16(_mm_cvtepu8_epi16(vb${K}), vb_zero_point);
153              $else:
154                const __m128i vxb${K} = _mm_sub_epi16(_mm_unpacklo_epi8(vb${K}, vzero), vb_zero_point);
155            $else:
156              $if SSE == 4:
157                const __m128i vxb${K} = _mm_cvtepi8_epi16(vb${K});
158              $else:
159                const __m128i vxb${K} = _mm_srai_epi16(_mm_unpacklo_epi8(vb${K}, vb${K}), 8);
160
161            $for M in range(MR):
162              $if XOP:
163                vacc${M}x0123 = _mm_maddd_epi16(
164                  _mm_shuffle_epi32(vxa${M}, _MM_SHUFFLE(${K}, ${K}, ${K}, ${K})), vxb${K}, vacc${M}x0123);
165              $else:
166                vacc${M}x0123 = _mm_add_epi32(vacc${M}x0123,
167                  _mm_madd_epi16(_mm_shuffle_epi32(vxa${M}, _MM_SHUFFLE(${K}, ${K}, ${K}, ${K})), vxb${K}));
168
169        w = (const void*) ((const ${XINT8_T}*) w + 32);
170        k -= 8 * sizeof(${XINT8_T});
171      }
172      if (k != 0) {
173        $for M in range(MR):
174          const __m128i va${M} = _mm_loadl_epi64((const __m128i*) a${M});
175          $if DATATYPE == "QU8":
176            $if SSE == 4:
177              const __m128i vxa${M} = _mm_cvtepu8_epi16(va${M});
178            $else:
179              const __m128i vxa${M} = _mm_unpacklo_epi8(va${M}, vzero);
180          $else:
181            $if SSE == 4:
182              const __m128i vxa${M} = _mm_cvtepi8_epi16(va${M});
183            $else:
184              const __m128i vxa${M} = _mm_srai_epi16(_mm_unpacklo_epi8(va${M}, va${M}), 8);
185          a${M} = (const ${XINT8_T}*) ((uintptr_t) a${M} + k);
186
187        const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
188        w = (const void*) ((const ${XINT8_T}*) w + 8);
189        $if DATATYPE == "QU8":
190          $if SSE == 4:
191            const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
192          $else:
193            const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb0, vzero), vb_zero_point);
194        $else:
195          $if SSE == 4:
196            const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
197          $else:
198            const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8);
199
200        $for M in range(MR):
201          $if XOP:
202            vacc${M}x0123 = _mm_maddd_epi16(
203              _mm_shuffle_epi32(vxa${M}, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc${M}x0123);
204          $else:
205            vacc${M}x0123 = _mm_add_epi32(vacc${M}x0123,
206              _mm_madd_epi16(_mm_shuffle_epi32(vxa${M}, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
207
208        if (k > 2 * sizeof(${XINT8_T})) {
209          const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
210          w = (const void*) ((const ${XINT8_T}*) w + 8);
211          $if DATATYPE == "QU8":
212            $if SSE == 4:
213              const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
214            $else:
215              const __m128i vxb1 = _mm_sub_epi16(_mm_unpacklo_epi8(vb1, vzero), vb_zero_point);
216          $else:
217            $if SSE == 4:
218              const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
219            $else:
220              const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8);
221
222          $for M in range(MR):
223            $if XOP:
224              vacc${M}x0123 = _mm_maddd_epi16(
225                _mm_shuffle_epi32(vxa${M}, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc${M}x0123);
226            $else:
227              vacc${M}x0123 = _mm_add_epi32(vacc${M}x0123,
228                _mm_madd_epi16(_mm_shuffle_epi32(vxa${M}, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
229
230          if (k > 4 * sizeof(${XINT8_T})) {
231            const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
232            w = (const void*) ((const ${XINT8_T}*) w + 8);
233            $if DATATYPE == "QU8":
234              $if SSE == 4:
235                const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
236              $else:
237                const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb2, vzero), vb_zero_point);
238            $else:
239              $if SSE == 4:
240                const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
241              $else:
242                const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8);
243
244            $for M in range(MR):
245              $if XOP:
246                vacc${M}x0123 = _mm_maddd_epi16(
247                  _mm_shuffle_epi32(vxa${M}, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc${M}x0123);
248              $else:
249                vacc${M}x0123 = _mm_add_epi32(vacc${M}x0123,
250                  _mm_madd_epi16(_mm_shuffle_epi32(vxa${M}, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
251          }
252        }
253      }
254      p -= ${MR} * sizeof(void*);
255    } while (p != 0);
256
257    $for M in range(MR):
258      __m128 vscaled${M}x0123 = _mm_cvtepi32_ps(vacc${M}x0123);
259
260    $if DATATYPE == "QC8":
261      const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
262      w = (const void*) ((const float*) w + 4);
263      $for M in range(MR):
264        vscaled${M}x0123 = _mm_mul_ps(vscaled${M}x0123, vscale0123);
265    $else:
266      const __m128 vscale = _mm_load_ps(params->${PARAMS_STRUCT}.scale);
267      $for M in range(MR):
268        vscaled${M}x0123 = _mm_mul_ps(vscaled${M}x0123, vscale);
269
270    const __m128 voutput_max_less_zero_point = _mm_load_ps(params->${PARAMS_STRUCT}.output_max_less_zero_point);
271    $for M in range(MR):
272      vscaled${M}x0123 = _mm_min_ps(vscaled${M}x0123, voutput_max_less_zero_point);
273
274    $for M in range(MR):
275      vacc${M}x0123 = _mm_cvtps_epi32(vscaled${M}x0123);
276
277    const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_zero_point);
278    $for M in range(0, MR, 2):
279      __m128i vacc${M}${min(M+1, MR-1)}x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc${M}x0123, vacc${min(M+1, MR-1)}x0123), voutput_zero_point);
280
281    $if DATATYPE == "QU8":
282      $if MR > 2:
283        __m128i vout = _mm_packus_epi16(vacc0${min(1, MR-1)}x0123, vacc${min(2, MR-1)}${min(3, MR-1)}x0123);
284      $else:
285        __m128i vout = _mm_packus_epi16(vacc0${min(1, MR-1)}x0123, vacc0${min(1, MR-1)}x0123);
286
287      vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_min));
288    $else:
289      $if SSE < 4:
290        const __m128i voutput_min = _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_min);
291        $for M in range(0, MR, 2):
292          vacc${M}${min(M+1, MR-1)}x0123 = _mm_max_epi16(vacc${M}${min(M+1, MR-1)}x0123, voutput_min);
293
294      $if MR > 2:
295        __m128i vout = _mm_packs_epi16(vacc0${min(1, MR-1)}x0123, vacc${min(2, MR-1)}${min(3, MR-1)}x0123);
296      $else:
297        __m128i vout = _mm_packs_epi16(vacc0${min(1, MR-1)}x0123, vacc0${min(1, MR-1)}x0123);
298
299      $if SSE == 4:
300        vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->${PARAMS_STRUCT}.output_min));
301
302    if (nc >= 4) {
303      $for M in reversed(range(1, MR)):
304        $if SSE == 4:
305          unaligned_store_u32(c${M}, (uint32_t) _mm_extract_epi32(vout, ${M}));
306        $else:
307          unaligned_store_u32(c${M}, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(${M}, ${M}, ${M}, ${M}))));
308        c${M} = (${XINT8_T}*) ((uintptr_t) c${M} + cn_stride);
309      unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
310      c0 = (${XINT8_T}*) ((uintptr_t) c0 + cn_stride);
311
312      a = (const ${XINT8_T}**restrict) ((uintptr_t) a - ks);
313
314      nc -= 4;
315    } else {
316      if (nc & 2) {
317        $for M in reversed(range(MR)):
318          unaligned_store_u16(c${M}, (uint16_t) _mm_extract_epi16(vout, ${M * 2}));
319          c${M} += 2;
320        vout = _mm_srli_epi32(vout, 16);
321      }
322      if (nc & 1) {
323        $if SSE == 4:
324          $for M in reversed(range(MR)):
325            *c${M} = (${XINT8_T}) _mm_extract_epi8(vout, ${M * 4});
326        $else:
327          $for M in reversed(range(1, MR)):
328            *c${M} = (${XINT8_T}) _mm_extract_epi16(vout, ${M * 2});
329          *c0 = (${XINT8_T}) _mm_cvtsi128_si32(vout);
330      }
331
332      nc = 0;
333    }
334  } while (nc != 0);
335}
336