1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-gavgpool/multipass-sse4.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <smmintrin.h>
13
14 #include <xnnpack/gavgpool.h>
15 #include <xnnpack/math.h>
16 #include <xnnpack/unaligned.h>
17
18
xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__sse41_c16(size_t rows,size_t channels,const uint8_t * input,size_t input_stride,const uint8_t * zero,int32_t * buffer,uint8_t * output,const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__sse41_c16(
20 size_t rows,
21 size_t channels,
22 const uint8_t* input,
23 size_t input_stride,
24 const uint8_t* zero,
25 int32_t* buffer,
26 uint8_t* output,
27 const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28 {
29 assert(rows > 7);
30 assert(channels != 0);
31
32 const uint8_t* i0 = input;
33 const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
34 const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
35 const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
36 const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
37 const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
38 const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
39 const size_t input_increment = 7 * input_stride - round_up_po2(channels, 16) * sizeof(uint8_t);
40
41 const __m128i vinit_bias = _mm_load_si128((const __m128i*) params->fp32_sse4.init_bias);
42 int32_t* b = buffer;
43 size_t c = channels;
44 for (; c != 0; c = doz(c, 16)) {
45 const __m128i vxi0x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i0));
46 const __m128i vxi0x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i0 + 8)));
47 i0 += 16;
48 const __m128i vxi1x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i1));
49 const __m128i vxi1x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i1 + 8)));
50 i1 += 16;
51
52 __m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
53 const __m128i vxi2x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i2));
54 __m128i vacc89ABCDEF = _mm_add_epi16(vxi0x89ABCDEF, vxi1x89ABCDEF);
55 const __m128i vxi2x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i2 + 8)));
56 i2 += 16;
57
58 vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
59 const __m128i vxi3x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i3));
60 vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi2x89ABCDEF);
61 const __m128i vxi3x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i3 + 8)));
62 i3 += 16;
63 vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
64 const __m128i vxi4x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i4));
65 vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi3x89ABCDEF);
66 const __m128i vxi4x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i4 + 8)));
67 i4 += 16;
68 vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
69 const __m128i vxi5x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i5));
70 vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi4x89ABCDEF);
71 const __m128i vxi5x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i5 + 8)));
72 i5 += 16;
73 vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
74 const __m128i vxi6x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i6));
75 vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi5x89ABCDEF);
76 const __m128i vxi6x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i6 + 8)));
77 i6 += 16;
78
79 vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
80 vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi6x89ABCDEF);
81
82 const __m128i vzero = _mm_setzero_si128();
83 __m128i vacc0123 = _mm_cvtepu16_epi32(vacc01234567);
84 __m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vzero);
85 __m128i vacc89AB = _mm_cvtepu16_epi32(vacc89ABCDEF);
86 __m128i vaccCDEF = _mm_unpackhi_epi16(vacc89ABCDEF, vzero);
87
88 vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
89 vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
90 vacc89AB = _mm_add_epi32(vacc89AB, vinit_bias);
91 vaccCDEF = _mm_add_epi32(vaccCDEF, vinit_bias);
92
93 _mm_store_si128((__m128i*) b, vacc0123);
94 _mm_store_si128((__m128i*) (b + 4), vacc4567);
95 _mm_store_si128((__m128i*) (b + 8), vacc89AB);
96 _mm_store_si128((__m128i*) (b + 12), vaccCDEF);
97 b += 16;
98 }
99
100 for (rows -= 7; rows > 7; rows -= 7) {
101 i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
102 i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
103 i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
104 i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
105 i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
106 i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
107 i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
108
109 int32_t* b = buffer;
110 size_t c = channels;
111 for (; c != 0; c = doz(c, 16)) {
112 const __m128i vxi0x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i0));
113 const __m128i vxi0x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i0 + 8)));
114 i0 += 16;
115 const __m128i vxi1x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i1));
116 const __m128i vxi1x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i1 + 8)));
117 i1 += 16;
118
119 __m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
120 const __m128i vxi2x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i2));
121 __m128i vacc89ABCDEF = _mm_add_epi16(vxi0x89ABCDEF, vxi1x89ABCDEF);
122 const __m128i vxi2x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i2 + 8)));
123 i2 += 16;
124
125 vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
126 const __m128i vxi3x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i3));
127 vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi2x89ABCDEF);
128 const __m128i vxi3x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i3 + 8)));
129 i3 += 16;
130 vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
131 const __m128i vxi4x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i4));
132 vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi3x89ABCDEF);
133 const __m128i vxi4x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i4 + 8)));
134 i4 += 16;
135 vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
136 const __m128i vxi5x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i5));
137 vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi4x89ABCDEF);
138 const __m128i vxi5x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i5 + 8)));
139 i5 += 16;
140 vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
141 const __m128i vxi6x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i6));
142 vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi5x89ABCDEF);
143 const __m128i vxi6x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i6 + 8)));
144 i6 += 16;
145
146 vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
147 vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi6x89ABCDEF);
148
149 const __m128i vzero = _mm_setzero_si128();
150 __m128i vacc0123 = _mm_cvtepu16_epi32(vacc01234567);
151 __m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vzero);
152 __m128i vacc89AB = _mm_cvtepu16_epi32(vacc89ABCDEF);
153 __m128i vaccCDEF = _mm_unpackhi_epi16(vacc89ABCDEF, vzero);
154
155 vacc0123 = _mm_add_epi32(vacc0123, _mm_load_si128((const __m128i*) b));
156 vacc4567 = _mm_add_epi32(vacc4567, _mm_load_si128((const __m128i*) (b + 4)));
157 vacc89AB = _mm_add_epi32(vacc89AB, _mm_load_si128((const __m128i*) (b + 8)));
158 vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_load_si128((const __m128i*) (b + 12)));
159
160 _mm_store_si128((__m128i*) b, vacc0123);
161 _mm_store_si128((__m128i*) (b + 4), vacc4567);
162 _mm_store_si128((__m128i*) (b + 8), vacc89AB);
163 _mm_store_si128((__m128i*) (b + 12), vaccCDEF);
164 b += 16;
165 }
166 }
167
168 i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
169 i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
170 if XNN_UNPREDICTABLE(rows < 2) {
171 i1 = zero;
172 }
173 i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
174 if XNN_UNPREDICTABLE(rows <= 2) {
175 i2 = zero;
176 }
177 i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
178 if XNN_UNPREDICTABLE(rows < 4) {
179 i3 = zero;
180 }
181 i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
182 if XNN_UNPREDICTABLE(rows <= 4) {
183 i4 = zero;
184 }
185 i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
186 if XNN_UNPREDICTABLE(rows < 6) {
187 i5 = zero;
188 }
189 i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
190 if XNN_UNPREDICTABLE(rows <= 6) {
191 i6 = zero;
192 }
193
194 const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
195 const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
196 const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
197 const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
198 for (; channels >= 16; channels -= 16) {
199 const __m128i vxi0x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i0));
200 const __m128i vxi0x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i0 + 8)));
201 i0 += 16;
202 const __m128i vxi1x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i1));
203 const __m128i vxi1x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i1 + 8)));
204 i1 += 16;
205
206 __m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
207 const __m128i vxi2x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i2));
208 __m128i vacc89ABCDEF = _mm_add_epi16(vxi0x89ABCDEF, vxi1x89ABCDEF);
209 const __m128i vxi2x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i2 + 8)));
210 i2 += 16;
211
212 vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
213 const __m128i vxi3x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i3));
214 vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi2x89ABCDEF);
215 const __m128i vxi3x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i3 + 8)));
216 i3 += 16;
217 vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
218 const __m128i vxi4x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i4));
219 vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi3x89ABCDEF);
220 const __m128i vxi4x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i4 + 8)));
221 i4 += 16;
222 vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
223 const __m128i vxi5x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i5));
224 vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi4x89ABCDEF);
225 const __m128i vxi5x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i5 + 8)));
226 i5 += 16;
227 vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
228 const __m128i vxi6x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i6));
229 vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi5x89ABCDEF);
230 const __m128i vxi6x89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (i6 + 8)));
231 i6 += 16;
232
233 vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
234 vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi6x89ABCDEF);
235
236 const __m128i vzero = _mm_setzero_si128();
237 __m128i vacc0123 = _mm_cvtepu16_epi32(vacc01234567);
238 __m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vzero);
239 __m128i vacc89AB = _mm_cvtepu16_epi32(vacc89ABCDEF);
240 __m128i vaccCDEF = _mm_unpackhi_epi16(vacc89ABCDEF, vzero);
241
242 vacc0123 = _mm_add_epi32(vacc0123, _mm_load_si128((const __m128i*) buffer));
243 vacc4567 = _mm_add_epi32(vacc4567, _mm_load_si128((const __m128i*) (buffer + 4)));
244 vacc89AB = _mm_add_epi32(vacc89AB, _mm_load_si128((const __m128i*) (buffer + 8)));
245 vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_load_si128((const __m128i*) (buffer + 12)));
246 buffer += 16;
247
248 __m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
249 __m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
250 __m128 vfpacc89AB = _mm_cvtepi32_ps(vacc89AB);
251 __m128 vfpaccCDEF = _mm_cvtepi32_ps(vaccCDEF);
252
253 vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
254 vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
255 vfpacc89AB = _mm_mul_ps(vfpacc89AB, vscale);
256 vfpaccCDEF = _mm_mul_ps(vfpaccCDEF, vscale);
257
258 vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
259 vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
260 vfpacc89AB = _mm_min_ps(vfpacc89AB, voutput_max_less_zero_point);
261 vfpaccCDEF = _mm_min_ps(vfpaccCDEF, voutput_max_less_zero_point);
262
263 vacc0123 = _mm_cvtps_epi32(vfpacc0123);
264 vacc4567 = _mm_cvtps_epi32(vfpacc4567);
265 vacc89AB = _mm_cvtps_epi32(vfpacc89AB);
266 vaccCDEF = _mm_cvtps_epi32(vfpaccCDEF);
267
268 __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
269 __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
270
271 __m128i vout0123456789ABCDEF = _mm_packus_epi16(vout01234567, vout89ABCDEF);
272
273 vout0123456789ABCDEF = _mm_max_epu8(vout0123456789ABCDEF, voutput_min);
274
275 _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
276 output += 16;
277 }
278 if XNN_UNLIKELY(channels != 0) {
279 do {
280 const __m128i vxi0x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i0));
281 i0 += 8;
282 const __m128i vxi1x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i1));
283 i1 += 8;
284
285 __m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
286 const __m128i vxi2x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i2));
287 i2 += 8;
288
289 vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
290 const __m128i vxi3x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i3));
291 i3 += 8;
292 vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
293 const __m128i vxi4x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i4));
294 i4 += 8;
295 vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
296 const __m128i vxi5x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i5));
297 i5 += 8;
298 vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
299 const __m128i vxi6x01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) i6));
300 i6 += 8;
301
302 vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
303
304 __m128i vacc0123 = _mm_cvtepu16_epi32(vacc01234567);
305 __m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, _mm_setzero_si128());
306
307 vacc0123 = _mm_add_epi32(vacc0123, _mm_load_si128((const __m128i*) buffer));
308 vacc4567 = _mm_add_epi32(vacc4567, _mm_load_si128((const __m128i*) (buffer + 4)));
309 buffer += 8;
310
311 __m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
312 __m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
313
314 vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
315 vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
316
317 vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
318 vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
319
320 vacc0123 = _mm_cvtps_epi32(vfpacc0123);
321 vacc4567 = _mm_cvtps_epi32(vfpacc4567);
322
323 __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
324
325 __m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
326 vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
327
328 if XNN_LIKELY(channels >= 8) {
329 _mm_storel_epi64((__m128i*) output, vout0123456701234567);
330 output += 8;
331 channels -= 8;
332 } else {
333 if (channels & 4) {
334 unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
335 vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
336 output += 4;
337 }
338 if (channels & 2) {
339 unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
340 vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
341 output += 2;
342 }
343 if (channels & 1) {
344 *output = (uint8_t) _mm_extract_epi8(vout0123456701234567, 0);
345 output += 1;
346 }
347 channels = 0;
348 }
349 } while (channels != 0);
350 }
351 }
352