xref: /aosp_15_r20/external/XNNPACK/src/qc8-dwconv/gen/up8x9-minmax-fp32-neon-mul8-ld64.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-dwconv/unipass-neon-mul8.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/dwconv.h>
15 
16 
xnn_qc8_dwconv_minmax_fp32_ukernel_up8x9__neon_mul8_ld64(size_t channels,size_t output_width,const int8_t ** input,const void * weights,int8_t * output,size_t input_stride,size_t output_increment,size_t input_offset,const int8_t * zero,const union xnn_qc8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_qc8_dwconv_minmax_fp32_ukernel_up8x9__neon_mul8_ld64(
18     size_t channels,
19     size_t output_width,
20     const int8_t** input,
21     const void* weights,
22     int8_t* output,
23     size_t input_stride,
24     size_t output_increment,
25     size_t input_offset,
26     const int8_t* zero,
27     const union xnn_qc8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28 {
29   assert(channels != 0);
30   assert(output_width != 0);
31 
32   const float32x4_t vmagic_bias = vld1q_dup_f32(&params->fp32_neon.magic_bias);
33   const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(&params->fp32_neon.magic_bias_less_output_zero_point);
34   const int8x8_t voutput_min = vld1_dup_s8(&params->fp32_neon.output_min);
35   const int8x8_t voutput_max = vld1_dup_s8(&params->fp32_neon.output_max);
36   do {
37     const int8_t* i0 = input[0];
38     assert(i0 != NULL);
39     if XNN_UNPREDICTABLE(i0 != zero) {
40       i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
41     }
42     const int8_t* i1 = input[1];
43     assert(i1 != NULL);
44     if XNN_UNPREDICTABLE(i1 != zero) {
45       i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
46     }
47     const int8_t* i2 = input[2];
48     assert(i2 != NULL);
49     if XNN_UNPREDICTABLE(i2 != zero) {
50       i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
51     }
52     const int8_t* i3 = input[3];
53     assert(i3 != NULL);
54     if XNN_UNPREDICTABLE(i3 != zero) {
55       i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
56     }
57     const int8_t* i4 = input[4];
58     assert(i4 != NULL);
59     if XNN_UNPREDICTABLE(i4 != zero) {
60       i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
61     }
62     const int8_t* i5 = input[5];
63     assert(i5 != NULL);
64     if XNN_UNPREDICTABLE(i5 != zero) {
65       i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
66     }
67     const int8_t* i6 = input[6];
68     assert(i6 != NULL);
69     if XNN_UNPREDICTABLE(i6 != zero) {
70       i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
71     }
72     const int8_t* i7 = input[7];
73     assert(i7 != NULL);
74     if XNN_UNPREDICTABLE(i7 != zero) {
75       i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
76     }
77     const int8_t* i8 = input[8];
78     assert(i8 != NULL);
79     if XNN_UNPREDICTABLE(i8 != zero) {
80       i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
81     }
82     input = (const int8_t**) ((uintptr_t) input + input_stride);
83 
84     size_t c = channels;
85     const void* w = weights;
86     for (; c >= 8; c -= 8) {
87       int32x4_t vacc0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
88       int32x4_t vacc4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
89 
90       const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
91       const int8x8_t vk0x01234567 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
92 
93       int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
94 
95       vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
96       vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
97       const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
98       const int8x8_t vk1x01234567 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
99 
100       vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
101 
102       vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
103       vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
104       const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
105       const int8x8_t vk2x01234567 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
106 
107       vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
108 
109       vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
110       vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
111       const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
112       const int8x8_t vk3x01234567 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
113 
114       vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
115 
116       vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
117       vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
118       const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
119       const int8x8_t vk4x01234567 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
120 
121       vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
122 
123       vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
124       vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
125       const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
126       const int8x8_t vk5x01234567 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
127 
128       vprod01234567 = vmull_s8(vi5x01234567, vk5x01234567);
129 
130       vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
131       vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
132       const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
133       const int8x8_t vk6x01234567 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
134 
135       vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
136 
137       vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
138       vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
139       const int8x8_t vi7x01234567 = vld1_s8(i7); i7 += 8;
140       const int8x8_t vk7x01234567 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
141 
142       vprod01234567 = vmull_s8(vi7x01234567, vk7x01234567);
143 
144       vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
145       vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
146       const int8x8_t vi8x01234567 = vld1_s8(i8); i8 += 8;
147       const int8x8_t vk8x01234567 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
148 
149       vprod01234567 = vmull_s8(vi8x01234567, vk8x01234567);
150 
151       vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
152       vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
153 
154       float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
155       float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
156 
157       const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
158       const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
159 
160       vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
161       vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
162 
163       vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
164       vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
165 
166       vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
167       vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
168 
169 #if XNN_ARCH_ARM64
170       int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
171 
172 
173       int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
174 #else  // !XNN_ARCH_ARM64
175       int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
176 
177 
178       int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
179 #endif  // !XNN_ARCH_ARM64
180 
181       vout01234567 = vmax_s8(vout01234567, voutput_min);
182 
183       vout01234567 = vmin_s8(vout01234567, voutput_max);
184 
185       vst1_s8(output, vout01234567); output += 8;
186     }
187     if XNN_UNLIKELY(c != 0) {
188       {
189         int32x4_t vacc0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
190         int32x4_t vacc4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
191 
192         const int8x8_t vi0x01234567 = vld1_s8(i0);
193         const int8x8_t vk0x01234567 = vld1_s8(w);
194 
195         int16x8_t vprod01234567 = vmull_s8(vi0x01234567, vk0x01234567);
196 
197         vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
198         vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
199         const int8x8_t vi1x01234567 = vld1_s8(i1);
200         const int8x8_t vk1x01234567 = vld1_s8((const void*) ((const int8_t*) w + 8));
201 
202         vprod01234567 = vmull_s8(vi1x01234567, vk1x01234567);
203 
204         vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
205         vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
206         const int8x8_t vi2x01234567 = vld1_s8(i2);
207         const int8x8_t vk2x01234567 = vld1_s8((const void*) ((const int8_t*) w + 16));
208 
209         vprod01234567 = vmull_s8(vi2x01234567, vk2x01234567);
210 
211         vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
212         vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
213         const int8x8_t vi3x01234567 = vld1_s8(i3);
214         const int8x8_t vk3x01234567 = vld1_s8((const void*) ((const int8_t*) w + 24));
215 
216         vprod01234567 = vmull_s8(vi3x01234567, vk3x01234567);
217 
218         vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
219         vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
220         const int8x8_t vi4x01234567 = vld1_s8(i4);
221         const int8x8_t vk4x01234567 = vld1_s8((const void*) ((const int8_t*) w + 32));
222 
223         vprod01234567 = vmull_s8(vi4x01234567, vk4x01234567);
224 
225         vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
226         vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
227         const int8x8_t vi5x01234567 = vld1_s8(i5);
228         const int8x8_t vk5x01234567 = vld1_s8((const void*) ((const int8_t*) w + 40));
229 
230         vprod01234567 = vmull_s8(vi5x01234567, vk5x01234567);
231 
232         vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
233         vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
234         const int8x8_t vi6x01234567 = vld1_s8(i6);
235         const int8x8_t vk6x01234567 = vld1_s8((const void*) ((const int8_t*) w + 48));
236 
237         vprod01234567 = vmull_s8(vi6x01234567, vk6x01234567);
238 
239         vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
240         vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
241         const int8x8_t vi7x01234567 = vld1_s8(i7);
242         const int8x8_t vk7x01234567 = vld1_s8((const void*) ((const int8_t*) w + 56));
243 
244         vprod01234567 = vmull_s8(vi7x01234567, vk7x01234567);
245 
246         vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
247         vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
248         const int8x8_t vi8x01234567 = vld1_s8(i8);
249         const int8x8_t vk8x01234567 = vld1_s8((const void*) ((const int8_t*) w + 64));
250 
251         vprod01234567 = vmull_s8(vi8x01234567, vk8x01234567);
252 
253         vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vprod01234567));
254         vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vprod01234567));
255 
256         float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
257         float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
258 
259         const float32x4_t vscale0123 = vld1q_f32((const float*) ((uintptr_t) w + 0 * sizeof(int32_t) + 72 * sizeof(int8_t)));
260         const float32x4_t vscale4567 = vld1q_f32((const float*) ((uintptr_t) w + 0 * sizeof(int32_t) + 72 * sizeof(int8_t) + 4 * sizeof(float)));
261         vfpacc0123 = vmulq_f32(vfpacc0123, vscale0123);
262         vfpacc4567 = vmulq_f32(vfpacc4567, vscale4567);
263 
264         vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
265         vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
266 
267         vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
268         vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
269 
270 #if XNN_ARCH_ARM64
271         int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
272 #else
273         int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
274 #endif
275 
276         int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
277         vout01234567 = vmax_s8(vout01234567, voutput_min);
278         vout01234567 = vmin_s8(vout01234567, voutput_max);
279 
280         if (c & 4) {
281           vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
282           vout01234567 = vext_s8(vout01234567, vout01234567, 4);
283         }
284         if (c & 2) {
285           vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
286           vout01234567 = vext_s8(vout01234567, vout01234567, 2);
287         }
288         if (c & 1) {
289           vst1_lane_s8(output, vout01234567, 0); output += 1;
290         }
291       }
292     }
293 
294     output = (int8_t*) ((uintptr_t) output + output_increment);
295   } while (--output_width != 0);
296 }
297