1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-dwconv/unipass-sse-mul16.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <smmintrin.h>
13
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/unaligned.h>
16
17
xnn_qc8_dwconv_minmax_fp32_ukernel_up8x9__sse41_mul16_add16(size_t channels,size_t output_width,const int8_t ** input,const void * weights,int8_t * output,size_t input_stride,size_t output_increment,size_t input_offset,const int8_t * zero,const union xnn_qc8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qc8_dwconv_minmax_fp32_ukernel_up8x9__sse41_mul16_add16(
19 size_t channels,
20 size_t output_width,
21 const int8_t** input,
22 const void* weights,
23 int8_t* output,
24 size_t input_stride,
25 size_t output_increment,
26 size_t input_offset,
27 const int8_t* zero,
28 const union xnn_qc8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
29 {
30 assert(channels != 0);
31 assert(output_width != 0);
32
33 do {
34 const int8_t* i0 = input[0];
35 assert(i0 != NULL);
36 if XNN_UNPREDICTABLE(i0 != zero) {
37 i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
38 }
39 const int8_t* i1 = input[1];
40 assert(i1 != NULL);
41 if XNN_UNPREDICTABLE(i1 != zero) {
42 i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
43 }
44 const int8_t* i2 = input[2];
45 assert(i2 != NULL);
46 if XNN_UNPREDICTABLE(i2 != zero) {
47 i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
48 }
49 const int8_t* i3 = input[3];
50 assert(i3 != NULL);
51 if XNN_UNPREDICTABLE(i3 != zero) {
52 i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
53 }
54 const int8_t* i4 = input[4];
55 assert(i4 != NULL);
56 if XNN_UNPREDICTABLE(i4 != zero) {
57 i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
58 }
59 const int8_t* i5 = input[5];
60 assert(i5 != NULL);
61 if XNN_UNPREDICTABLE(i5 != zero) {
62 i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
63 }
64 const int8_t* i6 = input[6];
65 assert(i6 != NULL);
66 if XNN_UNPREDICTABLE(i6 != zero) {
67 i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
68 }
69 const int8_t* i7 = input[7];
70 assert(i7 != NULL);
71 if XNN_UNPREDICTABLE(i7 != zero) {
72 i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
73 }
74 const int8_t* i8 = input[8];
75 assert(i8 != NULL);
76 if XNN_UNPREDICTABLE(i8 != zero) {
77 i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
78 }
79 input = (const int8_t**) ((uintptr_t) input + input_stride);
80
81 size_t c = channels;
82 const void* w = weights;
83 for (; c >= 8; c -= 8) {
84 __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
85 __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
86
87
88 const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
89 const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
90 const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t)));
91 const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
92 i0 += 8;
93
94
95 __m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
96
97
98 const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
99 const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
100 const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t)));
101 const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
102 i1 += 8;
103
104
105 vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi1x01234567, vxk1x01234567));
106
107 vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
108 vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
109
110 const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
111 const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
112 const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t)));
113 const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
114 i2 += 8;
115
116
117 vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
118
119
120 const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
121 const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
122 const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t)));
123 const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
124 i3 += 8;
125
126
127 vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi3x01234567, vxk3x01234567));
128
129 vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
130 vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
131
132 const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
133 const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
134 const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t)));
135 const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
136 i4 += 8;
137
138
139 vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
140
141
142 const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
143 const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
144 const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t)));
145 const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
146 i5 += 8;
147
148
149 vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi5x01234567, vxk5x01234567));
150
151 vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
152 vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
153
154 const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
155 const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
156 const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(int8_t)));
157 const __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
158 i6 += 8;
159
160
161 vprod01234567 = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
162
163
164 const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
165 const __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
166 const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(int8_t)));
167 const __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
168 i7 += 8;
169
170
171 vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi7x01234567, vxk7x01234567));
172
173 vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
174 vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
175
176 const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
177 const __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
178 const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(int8_t)));
179 const __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
180 i8 += 8;
181
182
183 vprod01234567 = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
184
185 vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
186 vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
187
188 w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 72 * sizeof(int8_t));
189
190 __m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
191 __m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
192
193 const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
194 const __m128 vscale4567 = _mm_loadu_ps((const float*) w + 4);
195 w = (const void*) ((const float*) w + 8);
196 vscaled0123 = _mm_mul_ps(vscaled0123, vscale0123);
197 vscaled4567 = _mm_mul_ps(vscaled4567, vscale4567);
198
199 const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
200 vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
201 vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
202
203 vacc0123 = _mm_cvtps_epi32(vscaled0123);
204 vacc4567 = _mm_cvtps_epi32(vscaled4567);
205
206 const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
207 __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
208
209
210 __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
211
212 const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
213 vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
214
215 _mm_storel_epi64((__m128i*) output, vout0123456701234567);
216 output += 8;
217 }
218 if XNN_UNLIKELY(c != 0) {
219 {
220 __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
221 __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
222
223
224 const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
225 const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
226 const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t)));
227 const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
228
229
230 __m128i vprod01234567 = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
231
232
233 const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
234 const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
235 const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t)));
236 const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
237
238
239 vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi1x01234567, vxk1x01234567));
240
241 vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
242 vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
243
244 const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
245 const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
246 const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t)));
247 const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
248
249
250 vprod01234567 = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
251
252
253 const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
254 const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
255 const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t)));
256 const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
257
258
259 vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi3x01234567, vxk3x01234567));
260
261 vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
262 vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
263
264 const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
265 const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
266 const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t)));
267 const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
268
269
270 vprod01234567 = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
271
272
273 const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
274 const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
275 const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t)));
276 const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
277
278
279 vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi5x01234567, vxk5x01234567));
280
281 vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
282 vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
283
284 const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
285 const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
286 const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(int8_t)));
287 const __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
288
289
290 vprod01234567 = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
291
292
293 const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
294 const __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
295 const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(int8_t)));
296 const __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
297
298
299 vprod01234567 = _mm_add_epi16(vprod01234567, _mm_mullo_epi16(vxi7x01234567, vxk7x01234567));
300
301 vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
302 vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
303
304 const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
305 const __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
306 const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(int8_t)));
307 const __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
308
309
310 vprod01234567 = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
311
312 vacc0123 = _mm_add_epi32(vacc0123, _mm_cvtepi16_epi32(vprod01234567));
313 vacc4567 = _mm_add_epi32(vacc4567, _mm_srai_epi32(_mm_unpackhi_epi16(vprod01234567, vprod01234567), 16));
314
315
316 __m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
317 __m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
318
319 const __m128 vscale0123 = _mm_loadu_ps((const float*) ((uintptr_t) w + 8 * sizeof(int32_t) + 72 * sizeof(int8_t)));
320 const __m128 vscale4567 = _mm_loadu_ps((const float*) ((uintptr_t) w + 8 * sizeof(int32_t) + 72 * sizeof(int8_t) + 4 * sizeof(float)));
321 vscaled0123 = _mm_mul_ps(vscaled0123, vscale0123);
322 vscaled4567 = _mm_mul_ps(vscaled4567, vscale4567);
323
324 const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
325 vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
326 vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
327
328 vacc0123 = _mm_cvtps_epi32(vscaled0123);
329 vacc4567 = _mm_cvtps_epi32(vscaled4567);
330
331
332 const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
333 __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
334
335
336 __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
337
338 vout0123456701234567 = _mm_max_epi8(vout0123456701234567, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
339
340 if (c & 4) {
341 unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
342 vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
343 output += 4;
344 }
345 if (c & 2) {
346 unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
347 vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
348 output += 2;
349 }
350 if (c & 1) {
351 *output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
352 output += 1;
353 }
354 }
355 }
356
357 output = (int8_t*) ((uintptr_t) output + output_increment);
358 } while (--output_width != 0);
359 }
360