1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-igemm/MRx16c8-avx512skx.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <immintrin.h>
13
14 #include <xnnpack/igemm.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 #include <xnnpack/math.h>
17
18
xnn_qs8_igemm_minmax_fp32_ukernel_1x16c8__avx512skx(size_t mr,size_t nc,size_t kc,size_t ks,const int8_t ** restrict a,const void * restrict w,int8_t * restrict c,size_t cm_stride,size_t cn_stride,size_t a_offset,const int8_t * zero,const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_qs8_igemm_minmax_fp32_ukernel_1x16c8__avx512skx(
20 size_t mr,
21 size_t nc,
22 size_t kc,
23 size_t ks,
24 const int8_t** restrict a,
25 const void* restrict w,
26 int8_t* restrict c,
27 size_t cm_stride,
28 size_t cn_stride,
29 size_t a_offset,
30 const int8_t* zero,
31 const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
32 {
33 assert(mr != 0);
34 assert(mr <= 1);
35 assert(nc != 0);
36 assert(kc != 0);
37 assert(kc % sizeof(int8_t) == 0);
38 assert(a != NULL);
39 assert(w != NULL);
40 assert(c != NULL);
41
42 kc = round_up_po2(kc, 8);
43 int8_t* c0 = c;
44
45 const __mmask16 vbias_mask = _cvtu32_mask16(0x1111);
46 const __m512 vscale = _mm512_load_ps(params->fp32_avx512.scale);
47 const __m512 voutput_max_less_zero_point = _mm512_load_ps(params->fp32_avx512.output_max_less_zero_point);
48 const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->fp32_avx512.output_zero_point);
49 const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_avx512.output_min);
50 do {
51 __m512i vacc0x0123 = _mm512_maskz_expandloadu_epi32(vbias_mask, w);
52 __m512i vacc0x4567 = _mm512_maskz_expandloadu_epi32(vbias_mask, (const void*) ((const int32_t*) w + 4));
53 __m512i vacc0x89AB = _mm512_maskz_expandloadu_epi32(vbias_mask, (const void*) ((const int32_t*) w + 8));
54 __m512i vacc0xCDEF = _mm512_maskz_expandloadu_epi32(vbias_mask, (const void*) ((const int32_t*) w + 12));
55 w = (const void*) ((const int32_t*) w + 16);
56
57 size_t p = ks;
58 do {
59 const int8_t* restrict a0 = a[0];
60 if XNN_UNPREDICTABLE(a0 != zero) {
61 a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
62 }
63 a += 1;
64
65 size_t k = 0;
66 while (k < kc) {
67 const __m512i va0 = _mm512_broadcast_i32x4(_mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) a0)));
68 a0 += 8;
69
70 const __m512i vb0123 = _mm512_cvtepi8_epi16(_mm256_load_si256((const __m256i*) w));
71
72 vacc0x0123 = _mm512_add_epi32(vacc0x0123, _mm512_madd_epi16(va0, vb0123));
73 const __m512i vb4567 = _mm512_cvtepi8_epi16(_mm256_load_si256((const __m256i*) ((const int8_t*) w + 32)));
74
75 vacc0x4567 = _mm512_add_epi32(vacc0x4567, _mm512_madd_epi16(va0, vb4567));
76 const __m512i vb89AB = _mm512_cvtepi8_epi16(_mm256_load_si256((const __m256i*) ((const int8_t*) w + 64)));
77
78 vacc0x89AB = _mm512_add_epi32(vacc0x89AB, _mm512_madd_epi16(va0, vb89AB));
79 const __m512i vbCDEF = _mm512_cvtepi8_epi16(_mm256_load_si256((const __m256i*) ((const int8_t*) w + 96)));
80
81 vacc0xCDEF = _mm512_add_epi32(vacc0xCDEF, _mm512_madd_epi16(va0, vbCDEF));
82
83 w = (const void*) ((const int8_t*) w + 128);
84 k += 8 * sizeof(int8_t);
85 }
86 p -= 1 * sizeof(void*);
87 } while (p != 0);
88
89 const __m512i vacc0x04152637 = _mm512_add_epi32(_mm512_unpacklo_epi32(vacc0x0123, vacc0x4567), _mm512_unpackhi_epi32(vacc0x0123, vacc0x4567));
90 const __m512i vacc0x8C9DAEBF = _mm512_add_epi32(_mm512_unpacklo_epi32(vacc0x89AB, vacc0xCDEF), _mm512_unpackhi_epi32(vacc0x89AB, vacc0xCDEF));
91
92 __m512i vacc0x084C195D2A6E3B7F = _mm512_add_epi32(_mm512_unpacklo_epi32(vacc0x04152637, vacc0x8C9DAEBF), _mm512_unpackhi_epi32(vacc0x04152637, vacc0x8C9DAEBF));
93
94 __m512 vscaled0x084C195D2A6E3B7F = _mm512_cvtepi32_ps(vacc0x084C195D2A6E3B7F);
95
96 vscaled0x084C195D2A6E3B7F = _mm512_mul_ps(vscaled0x084C195D2A6E3B7F, vscale);
97
98 vscaled0x084C195D2A6E3B7F = _mm512_min_ps(vscaled0x084C195D2A6E3B7F, voutput_max_less_zero_point);
99
100 vacc0x084C195D2A6E3B7F = _mm512_cvtps_epi32(vscaled0x084C195D2A6E3B7F);
101
102 const __m256i vacc0x084C2A6E195D3B7F = _mm256_adds_epi16(_mm256_packs_epi32(_mm512_castsi512_si256(vacc0x084C195D2A6E3B7F), _mm512_extracti32x8_epi32(vacc0x084C195D2A6E3B7F, 1)), voutput_zero_point);
103
104 const __m128i vout0x084C2A6E195D3B7F = _mm_packs_epi16(_mm256_castsi256_si128(vacc0x084C2A6E195D3B7F), _mm256_extracti128_si256(vacc0x084C2A6E195D3B7F, 1));
105 __m128i vout0x0123456789ABCDEF = _mm_shuffle_epi8(vout0x084C2A6E195D3B7F, _mm_set_epi8(15, 7, 11, 3, 13, 5, 9, 1, 14, 6, 10, 2, 12, 4, 8, 0));
106 vout0x0123456789ABCDEF = _mm_max_epi8(vout0x0123456789ABCDEF, voutput_min);
107
108 if (nc >= 16) {
109 _mm_storeu_si128((__m128i*) c0, vout0x0123456789ABCDEF);
110
111 c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
112
113 a = (const int8_t**restrict) ((uintptr_t) a - ks);
114
115 nc -= 16;
116 } else {
117 // Prepare mask for valid 8-bit elements (depends on nc).
118 const __mmask64 vmask = _cvtu64_mask64((uint64_t) ((UINT32_C(1) << nc) - UINT32_C(1)));
119
120 _mm_mask_storeu_epi8(c0, vmask, vout0x0123456789ABCDEF);
121
122 nc = 0;
123 }
124 } while (nc != 0);
125 }
126