1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-vadd/sse-mul16-ld64.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <smmintrin.h>
13
14 #include <xnnpack/unaligned.h>
15 #include <xnnpack/vadd.h>
16
17
xnn_qs8_vadd_minmax_ukernel__sse41_mul16_ld64_x16(size_t n,const int8_t * input_a,const int8_t * input_b,int8_t * output,const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qs8_vadd_minmax_ukernel__sse41_mul16_ld64_x16(
19 size_t n,
20 const int8_t* input_a,
21 const int8_t* input_b,
22 int8_t* output,
23 const union xnn_qs8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
24 {
25 const __m128i vbias = _mm_load_si128((const __m128i*) params->sse4_mul16.bias);
26 const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse4_mul16.a_multiplier_lo);
27 const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse4_mul16.a_multiplier_hi);
28 const __m128i vb_multiplier_lo = _mm_load_si128((const __m128i*) params->sse4_mul16.b_multiplier_lo);
29 const __m128i vb_multiplier_hi = _mm_load_si128((const __m128i*) params->sse4_mul16.b_multiplier_hi);
30 const __m128i vshift = _mm_cvtsi32_si128((int) params->sse4_mul16.shift);
31 const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4_mul16.output_zero_point);
32 const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4_mul16.output_min);
33 const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4_mul16.output_max);
34
35 for (; n >= 16 * sizeof(int8_t); n -= 16 * sizeof(int8_t)) {
36 const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
37 const __m128i vb01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
38 const __m128i va89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 8)));
39 const __m128i vb89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_b + 8)));
40 input_a += 16;
41 input_b += 16;
42
43
44 __m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
45 __m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
46 const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
47 const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
48 __m128i vaprod89ABCDEFhi = _mm_mulhi_epu16(va89ABCDEF, va_multiplier_lo);
49 __m128i vbprod89ABCDEFhi = _mm_mulhi_epu16(vb89ABCDEF, vb_multiplier_lo);
50 const __m128i vaprod89ABCDEFlo = _mm_mullo_epi16(va89ABCDEF, va_multiplier_lo);
51 const __m128i vbprod89ABCDEFlo = _mm_mullo_epi16(vb89ABCDEF, vb_multiplier_lo);
52
53 vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
54 vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
55 vaprod89ABCDEFhi = _mm_add_epi16(vaprod89ABCDEFhi, _mm_mullo_epi16(va89ABCDEF, va_multiplier_hi));
56 vbprod89ABCDEFhi = _mm_add_epi16(vbprod89ABCDEFhi, _mm_mullo_epi16(vb89ABCDEF, vb_multiplier_hi));
57
58 vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
59 vbprod01234567hi = _mm_sub_epi16(vbprod01234567hi, _mm_and_si128(_mm_srai_epi16(vb01234567, 15), vb_multiplier_lo));
60 vaprod89ABCDEFhi = _mm_sub_epi16(vaprod89ABCDEFhi, _mm_and_si128(_mm_srai_epi16(va89ABCDEF, 15), va_multiplier_lo));
61 vbprod89ABCDEFhi = _mm_sub_epi16(vbprod89ABCDEFhi, _mm_and_si128(_mm_srai_epi16(vb89ABCDEF, 15), vb_multiplier_lo));
62
63 __m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
64 __m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
65 __m128i vacc89AB = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
66 __m128i vaccCDEF = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
67
68 vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
69 vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
70 vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vbprod89ABCDEFlo, vbprod89ABCDEFhi));
71 vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vbprod89ABCDEFlo, vbprod89ABCDEFhi));
72
73 vacc0123 = _mm_sra_epi32(vacc0123, vshift);
74 vacc4567 = _mm_sra_epi32(vacc4567, vshift);
75 vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
76 vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
77
78 __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
79 __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
80
81
82 __m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
83
84 vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
85
86 vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
87
88 _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
89 output += 16;
90 }
91 if XNN_UNLIKELY(n != 0) {
92 do {
93 const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
94 const __m128i vb01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
95 input_a += 8;
96 input_b += 8;
97
98
99 __m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
100 __m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
101 const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
102 const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
103
104 vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
105 vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
106
107 vaprod01234567hi = _mm_sub_epi16(vaprod01234567hi, _mm_and_si128(_mm_srai_epi16(va01234567, 15), va_multiplier_lo));
108 vbprod01234567hi = _mm_sub_epi16(vbprod01234567hi, _mm_and_si128(_mm_srai_epi16(vb01234567, 15), vb_multiplier_lo));
109
110 __m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
111 __m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
112
113 vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
114 vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
115
116 vacc0123 = _mm_sra_epi32(vacc0123, vshift);
117 vacc4567 = _mm_sra_epi32(vacc4567, vshift);
118
119 __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
120
121 __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
122 vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
123 vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
124
125 if XNN_LIKELY(n >= (8 * sizeof(int8_t))) {
126 _mm_storel_epi64((__m128i*) output, vout0123456701234567);
127 output += 8;
128 n -= 8 * sizeof(int8_t);
129 } else {
130 if (n & (4 * sizeof(int8_t))) {
131 unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
132 vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
133 output += 4;
134 }
135 if (n & (2 * sizeof(int8_t))) {
136 unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
137 vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
138 output += 2;
139 }
140 if (n & (1 * sizeof(int8_t))) {
141 *output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
142 }
143 n = 0;
144 }
145 } while (n != 0);
146 }
147 }
148