1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-gavgpool/multipass-sse2.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <emmintrin.h>
13
14 #include <xnnpack/gavgpool.h>
15 #include <xnnpack/math.h>
16 #include <xnnpack/unaligned.h>
17
18
xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__sse2_c16(size_t rows,size_t channels,const int8_t * input,size_t input_stride,const int8_t * zero,int32_t * buffer,int8_t * output,const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__sse2_c16(
20 size_t rows,
21 size_t channels,
22 const int8_t* input,
23 size_t input_stride,
24 const int8_t* zero,
25 int32_t* buffer,
26 int8_t* output,
27 const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28 {
29 assert(rows > 7);
30 assert(channels != 0);
31
32 const int8_t* i0 = input;
33 const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
34 const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
35 const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
36 const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
37 const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
38 const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
39 const size_t input_increment = 7 * input_stride - round_up_po2(channels, 16) * sizeof(int8_t);
40
41 const __m128i vinit_bias = _mm_load_si128((const __m128i*) params->fp32_sse2.init_bias);
42 int32_t* b = buffer;
43 size_t c = channels;
44 for (; c != 0; c = doz(c, 16)) {
45
46 const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
47 const __m128i vi0x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i0 + 8));
48 i0 += 16;
49
50 const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
51 const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
52 const __m128i vxi0x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x89ABCDEF, vi0x89ABCDEF), 8);
53 const __m128i vi1x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i1 + 8));
54 i1 += 16;
55
56 const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
57 const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
58 const __m128i vxi1x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x89ABCDEF, vi1x89ABCDEF), 8);
59 const __m128i vi2x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i2 + 8));
60 i2 += 16;
61
62 __m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
63 const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
64 const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
65 __m128i vacc89ABCDEF = _mm_add_epi16(vxi0x89ABCDEF, vxi1x89ABCDEF);
66 const __m128i vxi2x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x89ABCDEF, vi2x89ABCDEF), 8);
67 const __m128i vi3x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i3 + 8));
68 i3 += 16;
69
70 vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
71 const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
72 const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
73 vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi2x89ABCDEF);
74 const __m128i vxi3x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x89ABCDEF, vi3x89ABCDEF), 8);
75 const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8));
76 i4 += 16;
77
78 vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
79 const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
80 const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
81 vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi3x89ABCDEF);
82 const __m128i vxi4x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x89ABCDEF, vi4x89ABCDEF), 8);
83 const __m128i vi5x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i5 + 8));
84 i5 += 16;
85
86 vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
87 const __m128i vxi5x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi5x01234567, vi5x01234567), 8);
88 const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
89 vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi4x89ABCDEF);
90 const __m128i vxi5x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi5x89ABCDEF, vi5x89ABCDEF), 8);
91 const __m128i vi6x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i6 + 8));
92 i6 += 16;
93
94 vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
95 const __m128i vxi6x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi6x01234567, vi6x01234567), 8);
96 vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi5x89ABCDEF);
97 const __m128i vxi6x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi6x89ABCDEF, vi6x89ABCDEF), 8);
98
99 vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
100 vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi6x89ABCDEF);
101
102 const __m128i vsgnacc01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vacc01234567);
103 __m128i vacc0123 = _mm_unpacklo_epi16(vacc01234567, vsgnacc01234567);
104 __m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vsgnacc01234567);
105 const __m128i vsgnacc89ABCDEF = _mm_cmpgt_epi16(_mm_setzero_si128(), vacc89ABCDEF);
106 __m128i vacc89AB = _mm_unpacklo_epi16(vacc89ABCDEF, vsgnacc89ABCDEF);
107 __m128i vaccCDEF = _mm_unpackhi_epi16(vacc89ABCDEF, vsgnacc89ABCDEF);
108
109 vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
110 vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
111 vacc89AB = _mm_add_epi32(vacc89AB, vinit_bias);
112 vaccCDEF = _mm_add_epi32(vaccCDEF, vinit_bias);
113
114 _mm_store_si128((__m128i*) b, vacc0123);
115 _mm_store_si128((__m128i*) (b + 4), vacc4567);
116 _mm_store_si128((__m128i*) (b + 8), vacc89AB);
117 _mm_store_si128((__m128i*) (b + 12), vaccCDEF);
118 b += 16;
119 }
120
121 for (rows -= 7; rows > 7; rows -= 7) {
122 i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
123 i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
124 i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
125 i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
126 i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
127 i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
128 i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
129
130 int32_t* b = buffer;
131 size_t c = channels;
132 for (; c != 0; c = doz(c, 16)) {
133
134 const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
135 const __m128i vi0x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i0 + 8));
136 i0 += 16;
137
138 const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
139 const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
140 const __m128i vxi0x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x89ABCDEF, vi0x89ABCDEF), 8);
141 const __m128i vi1x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i1 + 8));
142 i1 += 16;
143
144 const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
145 const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
146 const __m128i vxi1x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x89ABCDEF, vi1x89ABCDEF), 8);
147 const __m128i vi2x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i2 + 8));
148 i2 += 16;
149
150 __m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
151 const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
152 const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
153 __m128i vacc89ABCDEF = _mm_add_epi16(vxi0x89ABCDEF, vxi1x89ABCDEF);
154 const __m128i vxi2x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x89ABCDEF, vi2x89ABCDEF), 8);
155 const __m128i vi3x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i3 + 8));
156 i3 += 16;
157
158 vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
159 const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
160 const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
161 vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi2x89ABCDEF);
162 const __m128i vxi3x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x89ABCDEF, vi3x89ABCDEF), 8);
163 const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8));
164 i4 += 16;
165
166 vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
167 const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
168 const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
169 vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi3x89ABCDEF);
170 const __m128i vxi4x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x89ABCDEF, vi4x89ABCDEF), 8);
171 const __m128i vi5x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i5 + 8));
172 i5 += 16;
173
174 vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
175 const __m128i vxi5x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi5x01234567, vi5x01234567), 8);
176 const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
177 vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi4x89ABCDEF);
178 const __m128i vxi5x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi5x89ABCDEF, vi5x89ABCDEF), 8);
179 const __m128i vi6x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i6 + 8));
180 i6 += 16;
181
182 vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
183 const __m128i vxi6x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi6x01234567, vi6x01234567), 8);
184 vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi5x89ABCDEF);
185 const __m128i vxi6x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi6x89ABCDEF, vi6x89ABCDEF), 8);
186
187 vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
188 vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi6x89ABCDEF);
189
190 const __m128i vsgnacc01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vacc01234567);
191 __m128i vacc0123 = _mm_unpacklo_epi16(vacc01234567, vsgnacc01234567);
192 __m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vsgnacc01234567);
193 const __m128i vsgnacc89ABCDEF = _mm_cmpgt_epi16(_mm_setzero_si128(), vacc89ABCDEF);
194 __m128i vacc89AB = _mm_unpacklo_epi16(vacc89ABCDEF, vsgnacc89ABCDEF);
195 __m128i vaccCDEF = _mm_unpackhi_epi16(vacc89ABCDEF, vsgnacc89ABCDEF);
196
197 vacc0123 = _mm_add_epi32(vacc0123, _mm_load_si128((const __m128i*) b));
198 vacc4567 = _mm_add_epi32(vacc4567, _mm_load_si128((const __m128i*) (b + 4)));
199 vacc89AB = _mm_add_epi32(vacc89AB, _mm_load_si128((const __m128i*) (b + 8)));
200 vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_load_si128((const __m128i*) (b + 12)));
201
202 _mm_store_si128((__m128i*) b, vacc0123);
203 _mm_store_si128((__m128i*) (b + 4), vacc4567);
204 _mm_store_si128((__m128i*) (b + 8), vacc89AB);
205 _mm_store_si128((__m128i*) (b + 12), vaccCDEF);
206 b += 16;
207 }
208 }
209
210 i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
211 i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
212 if XNN_UNPREDICTABLE(rows < 2) {
213 i1 = zero;
214 }
215 i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
216 if XNN_UNPREDICTABLE(rows <= 2) {
217 i2 = zero;
218 }
219 i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
220 if XNN_UNPREDICTABLE(rows < 4) {
221 i3 = zero;
222 }
223 i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
224 if XNN_UNPREDICTABLE(rows <= 4) {
225 i4 = zero;
226 }
227 i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
228 if XNN_UNPREDICTABLE(rows < 6) {
229 i5 = zero;
230 }
231 i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
232 if XNN_UNPREDICTABLE(rows <= 6) {
233 i6 = zero;
234 }
235
236 const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
237 const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
238 const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
239 const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
240 for (; channels >= 16; channels -= 16) {
241
242 const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
243 const __m128i vi0x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i0 + 8));
244 i0 += 16;
245
246 const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
247 const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
248 const __m128i vxi0x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x89ABCDEF, vi0x89ABCDEF), 8);
249 const __m128i vi1x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i1 + 8));
250 i1 += 16;
251
252 const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
253 const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
254 const __m128i vxi1x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x89ABCDEF, vi1x89ABCDEF), 8);
255 const __m128i vi2x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i2 + 8));
256 i2 += 16;
257
258 __m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
259 const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
260 const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
261 __m128i vacc89ABCDEF = _mm_add_epi16(vxi0x89ABCDEF, vxi1x89ABCDEF);
262 const __m128i vxi2x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x89ABCDEF, vi2x89ABCDEF), 8);
263 const __m128i vi3x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i3 + 8));
264 i3 += 16;
265
266 vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
267 const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
268 const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
269 vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi2x89ABCDEF);
270 const __m128i vxi3x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x89ABCDEF, vi3x89ABCDEF), 8);
271 const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8));
272 i4 += 16;
273
274 vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
275 const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
276 const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
277 vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi3x89ABCDEF);
278 const __m128i vxi4x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x89ABCDEF, vi4x89ABCDEF), 8);
279 const __m128i vi5x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i5 + 8));
280 i5 += 16;
281
282 vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
283 const __m128i vxi5x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi5x01234567, vi5x01234567), 8);
284 const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
285 vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi4x89ABCDEF);
286 const __m128i vxi5x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi5x89ABCDEF, vi5x89ABCDEF), 8);
287 const __m128i vi6x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i6 + 8));
288 i6 += 16;
289
290 vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
291 const __m128i vxi6x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi6x01234567, vi6x01234567), 8);
292 vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi5x89ABCDEF);
293 const __m128i vxi6x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi6x89ABCDEF, vi6x89ABCDEF), 8);
294
295 vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
296 vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi6x89ABCDEF);
297
298 const __m128i vsgnacc01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vacc01234567);
299 __m128i vacc0123 = _mm_unpacklo_epi16(vacc01234567, vsgnacc01234567);
300 __m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vsgnacc01234567);
301 const __m128i vsgnacc89ABCDEF = _mm_cmpgt_epi16(_mm_setzero_si128(), vacc89ABCDEF);
302 __m128i vacc89AB = _mm_unpacklo_epi16(vacc89ABCDEF, vsgnacc89ABCDEF);
303 __m128i vaccCDEF = _mm_unpackhi_epi16(vacc89ABCDEF, vsgnacc89ABCDEF);
304
305 vacc0123 = _mm_add_epi32(vacc0123, _mm_load_si128((const __m128i*) buffer));
306 vacc4567 = _mm_add_epi32(vacc4567, _mm_load_si128((const __m128i*) (buffer + 4)));
307 vacc89AB = _mm_add_epi32(vacc89AB, _mm_load_si128((const __m128i*) (buffer + 8)));
308 vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_load_si128((const __m128i*) (buffer + 12)));
309 buffer += 16;
310
311 __m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
312 __m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
313 __m128 vfpacc89AB = _mm_cvtepi32_ps(vacc89AB);
314 __m128 vfpaccCDEF = _mm_cvtepi32_ps(vaccCDEF);
315
316 vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
317 vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
318 vfpacc89AB = _mm_mul_ps(vfpacc89AB, vscale);
319 vfpaccCDEF = _mm_mul_ps(vfpaccCDEF, vscale);
320
321 vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
322 vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
323 vfpacc89AB = _mm_min_ps(vfpacc89AB, voutput_max_less_zero_point);
324 vfpaccCDEF = _mm_min_ps(vfpaccCDEF, voutput_max_less_zero_point);
325
326 vacc0123 = _mm_cvtps_epi32(vfpacc0123);
327 vacc4567 = _mm_cvtps_epi32(vfpacc4567);
328 vacc89AB = _mm_cvtps_epi32(vfpacc89AB);
329 vaccCDEF = _mm_cvtps_epi32(vfpaccCDEF);
330
331 __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
332 __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
333
334 vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
335 vout89ABCDEF = _mm_max_epi16(vout89ABCDEF, voutput_min);
336
337 __m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
338
339
340 _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
341 output += 16;
342 }
343 if XNN_UNLIKELY(channels != 0) {
344 do {
345
346 const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
347 i0 += 8;
348
349 const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
350 i1 += 8;
351
352 const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
353 const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
354 i2 += 8;
355
356 const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
357 const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
358 i3 += 8;
359
360 __m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
361 const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
362 const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
363 i4 += 8;
364
365 vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
366 const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
367 const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
368 i5 += 8;
369
370 vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
371 const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
372 const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
373 i6 += 8;
374
375 vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
376 const __m128i vxi5x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi5x01234567, vi5x01234567), 8);
377
378 vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
379 const __m128i vxi6x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi6x01234567, vi6x01234567), 8);
380
381 vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
382
383 const __m128i vsgnacc01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vacc01234567);
384 __m128i vacc0123 = _mm_unpacklo_epi16(vacc01234567, vsgnacc01234567);
385 __m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vsgnacc01234567);
386
387 vacc0123 = _mm_add_epi32(vacc0123, _mm_load_si128((const __m128i*) buffer));
388 vacc4567 = _mm_add_epi32(vacc4567, _mm_load_si128((const __m128i*) (buffer + 4)));
389 buffer += 8;
390
391 __m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
392 __m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
393
394 vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
395 vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
396
397 vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
398 vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
399
400 vacc0123 = _mm_cvtps_epi32(vfpacc0123);
401 vacc4567 = _mm_cvtps_epi32(vfpacc4567);
402
403 __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
404 vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
405
406 __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
407
408 if XNN_LIKELY(channels >= 8) {
409 _mm_storel_epi64((__m128i*) output, vout0123456701234567);
410 output += 8;
411 channels -= 8;
412 } else {
413 if (channels & 4) {
414 unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
415 vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
416 output += 4;
417 }
418 uint32_t vout0123 = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
419 if (channels & 2) {
420 unaligned_store_u16(output, (uint16_t) vout0123);
421 vout0123 >>= 16;
422 output += 2;
423 }
424 if (channels & 1) {
425 *output = (int8_t) vout0123;
426 output += 1;
427 }
428 channels = 0;
429 }
430 } while (channels != 0);
431 }
432 }
433