1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-dwconv/unipass-sse-mul32.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <immintrin.h>
13
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 #include <xnnpack/unaligned.h>
17
18
xnn_qc8_dwconv_minmax_fp32_ukernel_up8x9__sse41_mul32(size_t channels,size_t output_width,const int8_t ** input,const void * weights,int8_t * output,size_t input_stride,size_t output_increment,size_t input_offset,const int8_t * zero,const union xnn_qc8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_qc8_dwconv_minmax_fp32_ukernel_up8x9__sse41_mul32(
20 size_t channels,
21 size_t output_width,
22 const int8_t** input,
23 const void* weights,
24 int8_t* output,
25 size_t input_stride,
26 size_t output_increment,
27 size_t input_offset,
28 const int8_t* zero,
29 const union xnn_qc8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
30 {
31 assert(channels != 0);
32 assert(output_width != 0);
33
34 do {
35 const int8_t* i0 = input[0];
36 assert(i0 != NULL);
37 if XNN_UNPREDICTABLE(i0 != zero) {
38 i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
39 }
40 const int8_t* i1 = input[1];
41 assert(i1 != NULL);
42 if XNN_UNPREDICTABLE(i1 != zero) {
43 i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
44 }
45 const int8_t* i2 = input[2];
46 assert(i2 != NULL);
47 if XNN_UNPREDICTABLE(i2 != zero) {
48 i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
49 }
50 const int8_t* i3 = input[3];
51 assert(i3 != NULL);
52 if XNN_UNPREDICTABLE(i3 != zero) {
53 i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
54 }
55 const int8_t* i4 = input[4];
56 assert(i4 != NULL);
57 if XNN_UNPREDICTABLE(i4 != zero) {
58 i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
59 }
60 const int8_t* i5 = input[5];
61 assert(i5 != NULL);
62 if XNN_UNPREDICTABLE(i5 != zero) {
63 i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
64 }
65 const int8_t* i6 = input[6];
66 assert(i6 != NULL);
67 if XNN_UNPREDICTABLE(i6 != zero) {
68 i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
69 }
70 const int8_t* i7 = input[7];
71 assert(i7 != NULL);
72 if XNN_UNPREDICTABLE(i7 != zero) {
73 i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
74 }
75 const int8_t* i8 = input[8];
76 assert(i8 != NULL);
77 if XNN_UNPREDICTABLE(i8 != zero) {
78 i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
79 }
80 input = (const int8_t**) ((uintptr_t) input + input_stride);
81
82 size_t c = channels;
83 const void* w = weights;
84 for (; c >= 8; c -= 8) {
85 __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
86 __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
87
88
89 const __m128i vi0x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i0)));
90 const __m128i vk0x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t)))));
91 const __m128i vi0x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i0 + 4)));
92 const __m128i vk0x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 4 * sizeof(int8_t)))));
93 i0 += 8;
94
95 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi0x0123, vk0x0123));
96 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi0x4567, vk0x4567));
97
98 const __m128i vi1x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i1)));
99 const __m128i vk1x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t)))));
100 const __m128i vi1x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i1 + 4)));
101 const __m128i vk1x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 12 * sizeof(int8_t)))));
102 i1 += 8;
103
104 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi1x0123, vk1x0123));
105 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi1x4567, vk1x4567));
106
107 const __m128i vi2x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i2)));
108 const __m128i vk2x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t)))));
109 const __m128i vi2x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i2 + 4)));
110 const __m128i vk2x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 20 * sizeof(int8_t)))));
111 i2 += 8;
112
113 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi2x0123, vk2x0123));
114 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi2x4567, vk2x4567));
115
116 const __m128i vi3x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i3)));
117 const __m128i vk3x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t)))));
118 const __m128i vi3x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i3 + 4)));
119 const __m128i vk3x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 28 * sizeof(int8_t)))));
120 i3 += 8;
121
122 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi3x0123, vk3x0123));
123 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi3x4567, vk3x4567));
124
125 const __m128i vi4x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i4)));
126 const __m128i vk4x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t)))));
127 const __m128i vi4x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i4 + 4)));
128 const __m128i vk4x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 36 * sizeof(int8_t)))));
129 i4 += 8;
130
131 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi4x0123, vk4x0123));
132 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi4x4567, vk4x4567));
133
134 const __m128i vi5x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i5)));
135 const __m128i vk5x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t)))));
136 const __m128i vi5x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i5 + 4)));
137 const __m128i vk5x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 44 * sizeof(int8_t)))));
138 i5 += 8;
139
140 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi5x0123, vk5x0123));
141 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi5x4567, vk5x4567));
142
143 const __m128i vi6x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i6)));
144 const __m128i vk6x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(int8_t)))));
145 const __m128i vi6x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i6 + 4)));
146 const __m128i vk6x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 52 * sizeof(int8_t)))));
147 i6 += 8;
148
149 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi6x0123, vk6x0123));
150 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi6x4567, vk6x4567));
151
152 const __m128i vi7x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i7)));
153 const __m128i vk7x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(int8_t)))));
154 const __m128i vi7x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i7 + 4)));
155 const __m128i vk7x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 60 * sizeof(int8_t)))));
156 i7 += 8;
157
158 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi7x0123, vk7x0123));
159 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi7x4567, vk7x4567));
160
161 const __m128i vi8x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i8)));
162 const __m128i vk8x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(int8_t)))));
163 const __m128i vi8x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i8 + 4)));
164 const __m128i vk8x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 68 * sizeof(int8_t)))));
165 i8 += 8;
166
167 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi8x0123, vk8x0123));
168 vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vi8x4567, vk8x4567));
169
170 w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 72 * sizeof(int8_t));
171
172 __m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
173 __m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
174
175 const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
176 const __m128 vscale4567 = _mm_loadu_ps((const float*) w + 4);
177 w = (const void*) ((const float*) w + 8);
178 vscaled0123 = _mm_mul_ps(vscaled0123, vscale0123);
179 vscaled4567 = _mm_mul_ps(vscaled4567, vscale4567);
180
181 const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
182 vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
183 vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
184
185 vacc0123 = _mm_cvtps_epi32(vscaled0123);
186 vacc4567 = _mm_cvtps_epi32(vscaled4567);
187
188 const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
189 __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
190
191 const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
192 __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
193 vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
194
195 _mm_storel_epi64((__m128i*) output, vout0123456701234567);
196 output += 8;
197 }
198 if XNN_UNLIKELY(c != 0) {
199 const int8_t* k = (const int8_t*) ((const int32_t*) w + 8);
200 do {
201 __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
202
203 const __m128i vi0x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i0)));
204 const __m128i vk0x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) k)));
205 i0 += 4;
206
207 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi0x0123, vk0x0123));
208 const __m128i vi1x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i1)));
209 const __m128i vk1x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 8))));
210 i1 += 4;
211
212 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi1x0123, vk1x0123));
213 const __m128i vi2x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i2)));
214 const __m128i vk2x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 16))));
215 i2 += 4;
216
217 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi2x0123, vk2x0123));
218 const __m128i vi3x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i3)));
219 const __m128i vk3x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 24))));
220 i3 += 4;
221
222 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi3x0123, vk3x0123));
223 const __m128i vi4x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i4)));
224 const __m128i vk4x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 32))));
225 i4 += 4;
226
227 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi4x0123, vk4x0123));
228 const __m128i vi5x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i5)));
229 const __m128i vk5x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 40))));
230 i5 += 4;
231
232 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi5x0123, vk5x0123));
233 const __m128i vi6x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i6)));
234 const __m128i vk6x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 48))));
235 i6 += 4;
236
237 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi6x0123, vk6x0123));
238 const __m128i vi7x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i7)));
239 const __m128i vk7x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 56))));
240 i7 += 4;
241
242 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi7x0123, vk7x0123));
243 const __m128i vi8x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i8)));
244 const __m128i vk8x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 64))));
245 i8 += 4;
246
247 vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vi8x0123, vk8x0123));
248
249 k += 4;
250
251 __m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
252 const __m128 vscale0123 = _mm_loadu_ps((const float*) ((uintptr_t) w + 8 * sizeof(int32_t) + 72 * sizeof(int8_t)));
253 vscaled0123 = _mm_mul_ps(vscaled0123, vscale0123);
254 vscaled0123 = _mm_min_ps(vscaled0123, _mm_load_ps(params->fp32_sse4.output_max_less_zero_point));
255 vacc0123 = _mm_cvtps_epi32(vscaled0123);
256
257 w = (const void*) ((const int32_t*) w + 4);
258
259 const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
260 __m128i vout0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc0123), voutput_zero_point);
261
262 vout0123 = _mm_packs_epi16(vout0123, vout0123);
263 vout0123 = _mm_max_epi8(vout0123, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
264
265 if XNN_LIKELY(c >= 4) {
266 _mm_storeu_si32(output, vout0123);
267 output += 4;
268 c -= 4;
269 } else {
270 if (c & 2) {
271 unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123, 0));
272 vout0123 = _mm_srli_epi32(vout0123, 16);
273 output += 2;
274 }
275 if (c & 1) {
276 *output = (int8_t) _mm_extract_epi8(vout0123, 0);
277 output += 1;
278 }
279 c = 0;
280 }
281 } while (c != 0);
282 }
283
284 output = (int8_t*) ((uintptr_t) output + output_increment);
285 } while (--output_width != 0);
286 }
287