1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-vaddc/sse-mul16-ld64.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <smmintrin.h>
13
14 #include <xnnpack/unaligned.h>
15 #include <xnnpack/vadd.h>
16
17
xnn_qs8_vaddc_minmax_ukernel__sse41_mul16_ld64_x32(size_t n,const int8_t * input_a,const int8_t * input_b,int8_t * output,const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qs8_vaddc_minmax_ukernel__sse41_mul16_ld64_x32(
19 size_t n,
20 const int8_t* input_a,
21 const int8_t* input_b,
22 int8_t* output,
23 const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
24 {
25 const __m128i vbias = _mm_add_epi32(
26 _mm_shuffle_epi32(_mm_cvtsi32_si128(params->sse4_mul16.b_multiplier * (int32_t) *input_b), _MM_SHUFFLE(0, 0, 0, 0)),
27 _mm_load_si128((const __m128i*) params->sse4_mul16.bias));
28 const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse4_mul16.a_multiplier_lo);
29 const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse4_mul16.a_multiplier_hi);
30 const __m128i vshift = _mm_cvtsi32_si128((int) params->sse4_mul16.shift);
31 const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul16.output_zero_point);
32 const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul16.output_min);
33 const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul16.output_max);
34
35 for (; n >= 32 * sizeof(int8_t); n -= 32 * sizeof(int8_t)) {
36 const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
37 const __m128i va89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 8)));
38 const __m128i vaGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 16)));
39 const __m128i vaOPQRSTUV = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 24)));
40 input_a += 32;
41
42
43 __m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
44 const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
45 __m128i vaprod89ABCDEFhi = _mm_mulhi_epu16(va89ABCDEF, va_multiplier_lo);
46 const __m128i vaprod89ABCDEFlo = _mm_mullo_epi16(va89ABCDEF, va_multiplier_lo);
47 __m128i vaprodGHIJKLMNhi = _mm_mulhi_epu16(vaGHIJKLMN, va_multiplier_lo);
48 const __m128i vaprodGHIJKLMNlo = _mm_mullo_epi16(vaGHIJKLMN, va_multiplier_lo);
49 __m128i vaprodOPQRSTUVhi = _mm_mulhi_epu16(vaOPQRSTUV, va_multiplier_lo);
50 const __m128i vaprodOPQRSTUVlo = _mm_mullo_epi16(vaOPQRSTUV, va_multiplier_lo);
51
52 vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
53 vaprod89ABCDEFhi = _mm_add_epi16(vaprod89ABCDEFhi, _mm_mullo_epi16(va89ABCDEF, va_multiplier_hi));
54 vaprodGHIJKLMNhi = _mm_add_epi16(vaprodGHIJKLMNhi, _mm_mullo_epi16(vaGHIJKLMN, va_multiplier_hi));
55 vaprodOPQRSTUVhi = _mm_add_epi16(vaprodOPQRSTUVhi, _mm_mullo_epi16(vaOPQRSTUV, va_multiplier_hi));
56
57 vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
58 vaprod89ABCDEFhi = _mm_sub_epi16(vaprod89ABCDEFhi, _mm_and_si128(_mm_srai_epi16(va89ABCDEF, 15), va_multiplier_lo));
59 vaprodGHIJKLMNhi = _mm_sub_epi16(vaprodGHIJKLMNhi, _mm_and_si128(_mm_srai_epi16(vaGHIJKLMN, 15), va_multiplier_lo));
60 vaprodOPQRSTUVhi = _mm_sub_epi16(vaprodOPQRSTUVhi, _mm_and_si128(_mm_srai_epi16(vaOPQRSTUV, 15), va_multiplier_lo));
61
62 __m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
63 __m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
64 __m128i vacc89AB = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
65 __m128i vaccCDEF = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
66 __m128i vaccGHIJ = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprodGHIJKLMNlo, vaprodGHIJKLMNhi));
67 __m128i vaccKLMN = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprodGHIJKLMNlo, vaprodGHIJKLMNhi));
68 __m128i vaccOPQR = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprodOPQRSTUVlo, vaprodOPQRSTUVhi));
69 __m128i vaccSTUV = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprodOPQRSTUVlo, vaprodOPQRSTUVhi));
70
71 vacc0123 = _mm_sra_epi32(vacc0123, vshift);
72 vacc4567 = _mm_sra_epi32(vacc4567, vshift);
73 vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
74 vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
75 vaccGHIJ = _mm_sra_epi32(vaccGHIJ, vshift);
76 vaccKLMN = _mm_sra_epi32(vaccKLMN, vshift);
77 vaccOPQR = _mm_sra_epi32(vaccOPQR, vshift);
78 vaccSTUV = _mm_sra_epi32(vaccSTUV, vshift);
79
80 __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
81 __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
82 __m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point);
83 __m128i voutOPQRSTUV = _mm_adds_epi16(_mm_packs_epi32(vaccOPQR, vaccSTUV), voutput_zero_point);
84
85
86 __m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
87 __m128i voutGHIJKLMNOPQRSTUV = _mm_packs_epi16(voutGHIJKLMN, voutOPQRSTUV);
88
89 vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
90 voutGHIJKLMNOPQRSTUV = _mm_max_epi8(voutGHIJKLMNOPQRSTUV, voutput_min);
91
92 vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
93 voutGHIJKLMNOPQRSTUV = _mm_min_epi8(voutGHIJKLMNOPQRSTUV, voutput_max);
94
95 _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
96 _mm_storeu_si128((__m128i*) (output + 16), voutGHIJKLMNOPQRSTUV);
97 output += 32;
98 }
99 if XNN_UNLIKELY(n != 0) {
100 do {
101 const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
102 input_a += 8;
103
104
105 __m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
106 const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
107
108 vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
109
110 vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
111
112 __m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
113 __m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
114
115 vacc0123 = _mm_sra_epi32(vacc0123, vshift);
116 vacc4567 = _mm_sra_epi32(vacc4567, vshift);
117
118 __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
119
120 __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
121 vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
122 vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
123
124 if XNN_LIKELY(n >= (8 * sizeof(int8_t))) {
125 _mm_storel_epi64((__m128i*) output, vout0123456701234567);
126 output += 8;
127 n -= 8 * sizeof(int8_t);
128 } else {
129 if (n & (4 * sizeof(int8_t))) {
130 unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
131 vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
132 output += 4;
133 }
134 if (n & (2 * sizeof(int8_t))) {
135 unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
136 vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
137 output += 2;
138 }
139 if (n & (1 * sizeof(int8_t))) {
140 *output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
141 }
142 n = 0;
143 }
144 } while (n != 0);
145 }
146 }
147