xref: /aosp_15_r20/external/XNNPACK/src/qc8-dwconv/gen/up8x9-minmax-fp32-xop-mul32.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-dwconv/unipass-sse-mul32.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #if defined(__GNUC__) || defined(__clang__)
13   #include <x86intrin.h>
14 #else
15   #include <immintrin.h>
16   #include <ammintrin.h>
17 #endif
18 
19 #include <xnnpack/dwconv.h>
20 #include <xnnpack/intrinsics-polyfill.h>
21 #include <xnnpack/unaligned.h>
22 
23 
xnn_qc8_dwconv_minmax_fp32_ukernel_up8x9__xop_mul32(size_t channels,size_t output_width,const int8_t ** input,const void * weights,int8_t * output,size_t input_stride,size_t output_increment,size_t input_offset,const int8_t * zero,const union xnn_qc8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])24 void xnn_qc8_dwconv_minmax_fp32_ukernel_up8x9__xop_mul32(
25     size_t channels,
26     size_t output_width,
27     const int8_t** input,
28     const void* weights,
29     int8_t* output,
30     size_t input_stride,
31     size_t output_increment,
32     size_t input_offset,
33     const int8_t* zero,
34     const union xnn_qc8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
35 {
36   assert(channels != 0);
37   assert(output_width != 0);
38 
39   do {
40     const int8_t* i0 = input[0];
41     assert(i0 != NULL);
42     if XNN_UNPREDICTABLE(i0 != zero) {
43       i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
44     }
45     const int8_t* i1 = input[1];
46     assert(i1 != NULL);
47     if XNN_UNPREDICTABLE(i1 != zero) {
48       i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
49     }
50     const int8_t* i2 = input[2];
51     assert(i2 != NULL);
52     if XNN_UNPREDICTABLE(i2 != zero) {
53       i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
54     }
55     const int8_t* i3 = input[3];
56     assert(i3 != NULL);
57     if XNN_UNPREDICTABLE(i3 != zero) {
58       i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
59     }
60     const int8_t* i4 = input[4];
61     assert(i4 != NULL);
62     if XNN_UNPREDICTABLE(i4 != zero) {
63       i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
64     }
65     const int8_t* i5 = input[5];
66     assert(i5 != NULL);
67     if XNN_UNPREDICTABLE(i5 != zero) {
68       i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
69     }
70     const int8_t* i6 = input[6];
71     assert(i6 != NULL);
72     if XNN_UNPREDICTABLE(i6 != zero) {
73       i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
74     }
75     const int8_t* i7 = input[7];
76     assert(i7 != NULL);
77     if XNN_UNPREDICTABLE(i7 != zero) {
78       i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
79     }
80     const int8_t* i8 = input[8];
81     assert(i8 != NULL);
82     if XNN_UNPREDICTABLE(i8 != zero) {
83       i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
84     }
85     input = (const int8_t**) ((uintptr_t) input + input_stride);
86 
87     size_t c = channels;
88     const void* w = weights;
89     for (; c >= 8; c -= 8) {
90       __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
91       __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((const int32_t*) w + 4));
92 
93 
94       const __m128i vi0x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i0)));
95       const __m128i vk0x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t)))));
96       const __m128i vi0x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i0 + 4)));
97       const __m128i vk0x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 4 * sizeof(int8_t)))));
98       i0 += 8;
99 
100       vacc0123 = _mm_macc_epi32(vi0x0123, vk0x0123, vacc0123);
101       vacc4567 = _mm_macc_epi32(vi0x4567, vk0x4567, vacc4567);
102 
103       const __m128i vi1x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i1)));
104       const __m128i vk1x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t)))));
105       const __m128i vi1x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i1 + 4)));
106       const __m128i vk1x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 12 * sizeof(int8_t)))));
107       i1 += 8;
108 
109       vacc0123 = _mm_macc_epi32(vi1x0123, vk1x0123, vacc0123);
110       vacc4567 = _mm_macc_epi32(vi1x4567, vk1x4567, vacc4567);
111 
112       const __m128i vi2x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i2)));
113       const __m128i vk2x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t)))));
114       const __m128i vi2x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i2 + 4)));
115       const __m128i vk2x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 20 * sizeof(int8_t)))));
116       i2 += 8;
117 
118       vacc0123 = _mm_macc_epi32(vi2x0123, vk2x0123, vacc0123);
119       vacc4567 = _mm_macc_epi32(vi2x4567, vk2x4567, vacc4567);
120 
121       const __m128i vi3x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i3)));
122       const __m128i vk3x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t)))));
123       const __m128i vi3x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i3 + 4)));
124       const __m128i vk3x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 28 * sizeof(int8_t)))));
125       i3 += 8;
126 
127       vacc0123 = _mm_macc_epi32(vi3x0123, vk3x0123, vacc0123);
128       vacc4567 = _mm_macc_epi32(vi3x4567, vk3x4567, vacc4567);
129 
130       const __m128i vi4x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i4)));
131       const __m128i vk4x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t)))));
132       const __m128i vi4x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i4 + 4)));
133       const __m128i vk4x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 36 * sizeof(int8_t)))));
134       i4 += 8;
135 
136       vacc0123 = _mm_macc_epi32(vi4x0123, vk4x0123, vacc0123);
137       vacc4567 = _mm_macc_epi32(vi4x4567, vk4x4567, vacc4567);
138 
139       const __m128i vi5x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i5)));
140       const __m128i vk5x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t)))));
141       const __m128i vi5x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i5 + 4)));
142       const __m128i vk5x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 44 * sizeof(int8_t)))));
143       i5 += 8;
144 
145       vacc0123 = _mm_macc_epi32(vi5x0123, vk5x0123, vacc0123);
146       vacc4567 = _mm_macc_epi32(vi5x4567, vk5x4567, vacc4567);
147 
148       const __m128i vi6x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i6)));
149       const __m128i vk6x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(int8_t)))));
150       const __m128i vi6x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i6 + 4)));
151       const __m128i vk6x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 52 * sizeof(int8_t)))));
152       i6 += 8;
153 
154       vacc0123 = _mm_macc_epi32(vi6x0123, vk6x0123, vacc0123);
155       vacc4567 = _mm_macc_epi32(vi6x4567, vk6x4567, vacc4567);
156 
157       const __m128i vi7x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i7)));
158       const __m128i vk7x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(int8_t)))));
159       const __m128i vi7x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i7 + 4)));
160       const __m128i vk7x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 60 * sizeof(int8_t)))));
161       i7 += 8;
162 
163       vacc0123 = _mm_macc_epi32(vi7x0123, vk7x0123, vacc0123);
164       vacc4567 = _mm_macc_epi32(vi7x4567, vk7x4567, vacc4567);
165 
166       const __m128i vi8x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i8)));
167       const __m128i vk8x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(int8_t)))));
168       const __m128i vi8x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i8 + 4)));
169       const __m128i vk8x4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) ((uintptr_t) w + 8 * sizeof(int32_t) + 68 * sizeof(int8_t)))));
170       i8 += 8;
171 
172       vacc0123 = _mm_macc_epi32(vi8x0123, vk8x0123, vacc0123);
173       vacc4567 = _mm_macc_epi32(vi8x4567, vk8x4567, vacc4567);
174 
175       w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 72 * sizeof(int8_t));
176 
177       __m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
178       __m128 vscaled4567 = _mm_cvtepi32_ps(vacc4567);
179 
180       const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
181       const __m128 vscale4567 = _mm_loadu_ps((const float*) w + 4);
182       w = (const void*) ((const float*) w + 8);
183       vscaled0123 = _mm_mul_ps(vscaled0123, vscale0123);
184       vscaled4567 = _mm_mul_ps(vscaled4567, vscale4567);
185 
186       const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
187       vscaled0123 = _mm_min_ps(vscaled0123, voutput_max_less_zero_point);
188       vscaled4567 = _mm_min_ps(vscaled4567, voutput_max_less_zero_point);
189 
190       vacc0123 = _mm_cvtps_epi32(vscaled0123);
191       vacc4567 = _mm_cvtps_epi32(vscaled4567);
192 
193       const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
194       __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
195 
196       const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
197       __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
198       vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
199 
200       _mm_storel_epi64((__m128i*) output, vout0123456701234567);
201       output += 8;
202     }
203     if XNN_UNLIKELY(c != 0) {
204       const int8_t* k = (const int8_t*) ((const int32_t*) w + 8);
205       do {
206         __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
207 
208         const __m128i vi0x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i0)));
209         const __m128i vk0x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) k)));
210         i0 += 4;
211 
212         vacc0123 = _mm_macc_epi32(vi0x0123, vk0x0123, vacc0123);
213         const __m128i vi1x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i1)));
214         const __m128i vk1x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 8))));
215         i1 += 4;
216 
217         vacc0123 = _mm_macc_epi32(vi1x0123, vk1x0123, vacc0123);
218         const __m128i vi2x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i2)));
219         const __m128i vk2x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 16))));
220         i2 += 4;
221 
222         vacc0123 = _mm_macc_epi32(vi2x0123, vk2x0123, vacc0123);
223         const __m128i vi3x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i3)));
224         const __m128i vk3x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 24))));
225         i3 += 4;
226 
227         vacc0123 = _mm_macc_epi32(vi3x0123, vk3x0123, vacc0123);
228         const __m128i vi4x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i4)));
229         const __m128i vk4x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 32))));
230         i4 += 4;
231 
232         vacc0123 = _mm_macc_epi32(vi4x0123, vk4x0123, vacc0123);
233         const __m128i vi5x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i5)));
234         const __m128i vk5x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 40))));
235         i5 += 4;
236 
237         vacc0123 = _mm_macc_epi32(vi5x0123, vk5x0123, vacc0123);
238         const __m128i vi6x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i6)));
239         const __m128i vk6x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 48))));
240         i6 += 4;
241 
242         vacc0123 = _mm_macc_epi32(vi6x0123, vk6x0123, vacc0123);
243         const __m128i vi7x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i7)));
244         const __m128i vk7x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 56))));
245         i7 += 4;
246 
247         vacc0123 = _mm_macc_epi32(vi7x0123, vk7x0123, vacc0123);
248         const __m128i vi8x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(i8)));
249         const __m128i vk8x0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128(*((const int*) (k + 64))));
250         i8 += 4;
251 
252         vacc0123 = _mm_macc_epi32(vi8x0123, vk8x0123, vacc0123);
253 
254         k += 4;
255 
256         __m128 vscaled0123 = _mm_cvtepi32_ps(vacc0123);
257         const __m128 vscale0123 = _mm_loadu_ps((const float*) ((uintptr_t) w + 8 * sizeof(int32_t) + 72 * sizeof(int8_t)));
258         vscaled0123 = _mm_mul_ps(vscaled0123, vscale0123);
259         vscaled0123 = _mm_min_ps(vscaled0123, _mm_load_ps(params->fp32_sse4.output_max_less_zero_point));
260         vacc0123 = _mm_cvtps_epi32(vscaled0123);
261 
262         w = (const void*) ((const int32_t*) w + 4);
263 
264         const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
265         __m128i vout0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc0123), voutput_zero_point);
266 
267         vout0123 = _mm_packs_epi16(vout0123, vout0123);
268         vout0123 = _mm_max_epi8(vout0123, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
269 
270         if XNN_LIKELY(c >= 4) {
271           _mm_storeu_si32(output, vout0123);
272           output += 4;
273           c -= 4;
274         } else {
275           if (c & 2) {
276             unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123, 0));
277             vout0123 = _mm_srli_epi32(vout0123, 16);
278             output += 2;
279           }
280           if (c & 1) {
281             *output = (int8_t) _mm_extract_epi8(vout0123, 0);
282             output += 1;
283           }
284           c = 0;
285         }
286       } while (c != 0);
287     }
288 
289     output = (int8_t*) ((uintptr_t) output + output_increment);
290   } while (--output_width != 0);
291 }
292