1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-gavgpool/unipass-sse2.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <emmintrin.h>
13
14 #include <xnnpack/gavgpool.h>
15 #include <xnnpack/unaligned.h>
16
17
xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__sse2_c24(size_t rows,size_t channels,const int8_t * input,size_t input_stride,const int8_t * zero,int8_t * output,const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__sse2_c24(
19 size_t rows,
20 size_t channels,
21 const int8_t* input,
22 size_t input_stride,
23 const int8_t* zero,
24 int8_t* output,
25 const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
26 {
27 assert(rows != 0);
28 assert(rows <= 7);
29 assert(channels != 0);
30
31 const int8_t* i0 = input;
32 const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
33 if XNN_UNPREDICTABLE(rows < 2) {
34 i1 = zero;
35 }
36 const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
37 if XNN_UNPREDICTABLE(rows <= 2) {
38 i2 = zero;
39 }
40 const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
41 if XNN_UNPREDICTABLE(rows < 4) {
42 i3 = zero;
43 }
44 const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
45 if XNN_UNPREDICTABLE(rows <= 4) {
46 i4 = zero;
47 }
48 const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
49 if XNN_UNPREDICTABLE(rows < 6) {
50 i5 = zero;
51 }
52 const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
53 if XNN_UNPREDICTABLE(rows <= 6) {
54 i6 = zero;
55 }
56
57 const __m128i vinit_bias = _mm_load_si128((const __m128i*) params->fp32_sse2.init_bias);
58 const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
59 const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
60 const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
61 const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
62 for (; channels >= 24; channels -= 24) {
63
64 const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
65 const __m128i vi0x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i0 + 8));
66 const __m128i vi0xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i0 + 16));
67 i0 += 24;
68
69 const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
70 const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
71 const __m128i vxi0x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x89ABCDEF, vi0x89ABCDEF), 8);
72 const __m128i vi1x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i1 + 8));
73 const __m128i vxi0xGHIJKLMN = _mm_srai_epi16(_mm_unpacklo_epi8(vi0xGHIJKLMN, vi0xGHIJKLMN), 8);
74 const __m128i vi1xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i1 + 16));
75 i1 += 24;
76
77 const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
78 const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
79 const __m128i vxi1x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x89ABCDEF, vi1x89ABCDEF), 8);
80 const __m128i vi2x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i2 + 8));
81 const __m128i vxi1xGHIJKLMN = _mm_srai_epi16(_mm_unpacklo_epi8(vi1xGHIJKLMN, vi1xGHIJKLMN), 8);
82 const __m128i vi2xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i2 + 16));
83 i2 += 24;
84
85 __m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
86 const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
87 const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
88 __m128i vacc89ABCDEF = _mm_add_epi16(vxi0x89ABCDEF, vxi1x89ABCDEF);
89 const __m128i vxi2x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x89ABCDEF, vi2x89ABCDEF), 8);
90 const __m128i vi3x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i3 + 8));
91 __m128i vaccGHIJKLMN = _mm_add_epi16(vxi0xGHIJKLMN, vxi1xGHIJKLMN);
92 const __m128i vxi2xGHIJKLMN = _mm_srai_epi16(_mm_unpacklo_epi8(vi2xGHIJKLMN, vi2xGHIJKLMN), 8);
93 const __m128i vi3xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i3 + 16));
94 i3 += 24;
95
96 vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
97 const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
98 const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
99 vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi2x89ABCDEF);
100 const __m128i vxi3x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x89ABCDEF, vi3x89ABCDEF), 8);
101 const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8));
102 vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi2xGHIJKLMN);
103 const __m128i vxi3xGHIJKLMN = _mm_srai_epi16(_mm_unpacklo_epi8(vi3xGHIJKLMN, vi3xGHIJKLMN), 8);
104 const __m128i vi4xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i4 + 16));
105 i4 += 24;
106
107 vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
108 const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
109 const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
110 vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi3x89ABCDEF);
111 const __m128i vxi4x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x89ABCDEF, vi4x89ABCDEF), 8);
112 const __m128i vi5x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i5 + 8));
113 vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi3xGHIJKLMN);
114 const __m128i vxi4xGHIJKLMN = _mm_srai_epi16(_mm_unpacklo_epi8(vi4xGHIJKLMN, vi4xGHIJKLMN), 8);
115 const __m128i vi5xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i5 + 16));
116 i5 += 24;
117
118 vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
119 const __m128i vxi5x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi5x01234567, vi5x01234567), 8);
120 const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
121 vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi4x89ABCDEF);
122 const __m128i vxi5x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi5x89ABCDEF, vi5x89ABCDEF), 8);
123 const __m128i vi6x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i6 + 8));
124 vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi4xGHIJKLMN);
125 const __m128i vxi5xGHIJKLMN = _mm_srai_epi16(_mm_unpacklo_epi8(vi5xGHIJKLMN, vi5xGHIJKLMN), 8);
126 const __m128i vi6xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i6 + 16));
127 i6 += 24;
128
129 vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
130 const __m128i vxi6x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi6x01234567, vi6x01234567), 8);
131 vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi5x89ABCDEF);
132 const __m128i vxi6x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi6x89ABCDEF, vi6x89ABCDEF), 8);
133 vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi5xGHIJKLMN);
134 const __m128i vxi6xGHIJKLMN = _mm_srai_epi16(_mm_unpacklo_epi8(vi6xGHIJKLMN, vi6xGHIJKLMN), 8);
135
136 vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
137 vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi6x89ABCDEF);
138 vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi6xGHIJKLMN);
139
140 const __m128i vsgnacc01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vacc01234567);
141 __m128i vacc0123 = _mm_unpacklo_epi16(vacc01234567, vsgnacc01234567);
142 __m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vsgnacc01234567);
143 const __m128i vsgnacc89ABCDEF = _mm_cmpgt_epi16(_mm_setzero_si128(), vacc89ABCDEF);
144 __m128i vacc89AB = _mm_unpacklo_epi16(vacc89ABCDEF, vsgnacc89ABCDEF);
145 __m128i vaccCDEF = _mm_unpackhi_epi16(vacc89ABCDEF, vsgnacc89ABCDEF);
146 const __m128i vsgnaccGHIJKLMN = _mm_cmpgt_epi16(_mm_setzero_si128(), vaccGHIJKLMN);
147 __m128i vaccGHIJ = _mm_unpacklo_epi16(vaccGHIJKLMN, vsgnaccGHIJKLMN);
148 __m128i vaccKLMN = _mm_unpackhi_epi16(vaccGHIJKLMN, vsgnaccGHIJKLMN);
149
150 vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
151 vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
152 vacc89AB = _mm_add_epi32(vacc89AB, vinit_bias);
153 vaccCDEF = _mm_add_epi32(vaccCDEF, vinit_bias);
154 vaccGHIJ = _mm_add_epi32(vaccGHIJ, vinit_bias);
155 vaccKLMN = _mm_add_epi32(vaccKLMN, vinit_bias);
156
157 __m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
158 __m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
159 __m128 vfpacc89AB = _mm_cvtepi32_ps(vacc89AB);
160 __m128 vfpaccCDEF = _mm_cvtepi32_ps(vaccCDEF);
161 __m128 vfpaccGHIJ = _mm_cvtepi32_ps(vaccGHIJ);
162 __m128 vfpaccKLMN = _mm_cvtepi32_ps(vaccKLMN);
163
164 vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
165 vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
166 vfpacc89AB = _mm_mul_ps(vfpacc89AB, vscale);
167 vfpaccCDEF = _mm_mul_ps(vfpaccCDEF, vscale);
168 vfpaccGHIJ = _mm_mul_ps(vfpaccGHIJ, vscale);
169 vfpaccKLMN = _mm_mul_ps(vfpaccKLMN, vscale);
170
171 vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
172 vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
173 vfpacc89AB = _mm_min_ps(vfpacc89AB, voutput_max_less_zero_point);
174 vfpaccCDEF = _mm_min_ps(vfpaccCDEF, voutput_max_less_zero_point);
175 vfpaccGHIJ = _mm_min_ps(vfpaccGHIJ, voutput_max_less_zero_point);
176 vfpaccKLMN = _mm_min_ps(vfpaccKLMN, voutput_max_less_zero_point);
177
178 vacc0123 = _mm_cvtps_epi32(vfpacc0123);
179 vacc4567 = _mm_cvtps_epi32(vfpacc4567);
180 vacc89AB = _mm_cvtps_epi32(vfpacc89AB);
181 vaccCDEF = _mm_cvtps_epi32(vfpaccCDEF);
182 vaccGHIJ = _mm_cvtps_epi32(vfpaccGHIJ);
183 vaccKLMN = _mm_cvtps_epi32(vfpaccKLMN);
184
185 __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
186 __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
187 __m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point);
188
189 vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
190 vout89ABCDEF = _mm_max_epi16(vout89ABCDEF, voutput_min);
191 voutGHIJKLMN = _mm_max_epi16(voutGHIJKLMN, voutput_min);
192
193 __m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
194 __m128i voutGHIJKLMNGHIJKLMN = _mm_packs_epi16(voutGHIJKLMN, voutGHIJKLMN);
195
196
197 _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
198 _mm_storel_epi64((__m128i*) (output + 16), voutGHIJKLMNGHIJKLMN);
199 output += 24;
200 }
201 if XNN_UNLIKELY(channels != 0) {
202 do {
203
204 const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
205 i0 += 8;
206
207 const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
208 i1 += 8;
209
210 const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
211 const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
212 i2 += 8;
213
214 const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
215 const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
216 i3 += 8;
217
218 __m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
219 const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
220 const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
221 i4 += 8;
222
223 vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
224 const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
225 const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
226 i5 += 8;
227
228 vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
229 const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
230 const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
231 i6 += 8;
232
233 vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
234 const __m128i vxi5x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi5x01234567, vi5x01234567), 8);
235
236 vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
237 const __m128i vxi6x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi6x01234567, vi6x01234567), 8);
238
239 vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
240
241 const __m128i vsgnacc01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vacc01234567);
242 __m128i vacc0123 = _mm_unpacklo_epi16(vacc01234567, vsgnacc01234567);
243 __m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vsgnacc01234567);
244
245 vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
246 vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
247
248 __m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
249 __m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
250
251 vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
252 vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
253
254 vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
255 vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
256
257 vacc0123 = _mm_cvtps_epi32(vfpacc0123);
258 vacc4567 = _mm_cvtps_epi32(vfpacc4567);
259
260 __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
261 vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
262
263 __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
264
265 if XNN_LIKELY(channels >= 8) {
266 _mm_storel_epi64((__m128i*) output, vout0123456701234567);
267 output += 8;
268 channels -= 8;
269 } else {
270 if (channels & 4) {
271 unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
272 vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
273 output += 4;
274 }
275 uint32_t vout0123 = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
276 if (channels & 2) {
277 unaligned_store_u16(output, (uint16_t) vout0123);
278 vout0123 >>= 16;
279 output += 2;
280 }
281 if (channels & 1) {
282 *output = (int8_t) vout0123;
283 output += 1;
284 }
285 channels = 0;
286 }
287 } while (channels != 0);
288 }
289 }
290