xref: /aosp_15_r20/external/XNNPACK/src/qs8-gavgpool/gen/7p7x-minmax-fp32-neonv8-c16.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-gavgpool/multipass-neon.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/gavgpool.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 #include <xnnpack/math.h>
17 
18 
xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__neonv8_c16(size_t rows,size_t channels,const int8_t * input,size_t input_stride,const int8_t * zero,int32_t * buffer,int8_t * output,const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__neonv8_c16(
20     size_t rows,
21     size_t channels,
22     const int8_t* input,
23     size_t input_stride,
24     const int8_t* zero,
25     int32_t* buffer,
26     int8_t* output,
27     const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28 {
29   assert(rows > 7);
30   assert(channels != 0);
31 
32   const int8_t* i0 = input;
33   const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
34   const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
35   const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
36   const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
37   const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
38   const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
39   const size_t input_increment = 7 * input_stride - round_up_po2(channels, 16) * sizeof(int8_t);
40 
41   const int32x4_t vinit_bias = vld1q_dup_s32(&params->fp32_neonv8.init_bias);
42   int32_t* b = buffer;
43   size_t c = channels;
44   for (; c != 0; c = doz(c, 16)) {
45     const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
46     const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
47     const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
48     const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
49 
50     const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
51     int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
52     const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
53     int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
54 
55     const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
56     vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
57     const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
58     vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
59     const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
60     vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
61     const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
62     vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
63     const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
64     vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
65     const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
66     vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
67     const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
68     vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
69     const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
70     vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
71     vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
72     vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
73 
74     const int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
75     const int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
76     const int32x4_t vacc89AB = vaddw_s16(vinit_bias, vget_low_s16(vsum89ABCDEF));
77     const int32x4_t vaccCDEF = vaddw_s16(vinit_bias, vget_high_s16(vsum89ABCDEF));
78 
79     vst1q_s32(b, vacc0123); b += 4;
80     vst1q_s32(b, vacc4567); b += 4;
81     vst1q_s32(b, vacc89AB); b += 4;
82     vst1q_s32(b, vaccCDEF); b += 4;
83   }
84 
85   for (rows -= 7; rows > 7; rows -= 7) {
86     i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
87     i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
88     i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
89     i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
90     i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
91     i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
92     i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
93 
94     int32_t* b = buffer;
95     size_t c = channels;
96     for (; c != 0; c = doz(c, 16)) {
97       const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
98       const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
99       const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
100       const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
101 
102       const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
103       int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
104       const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
105       int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
106 
107       const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
108       vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
109       const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
110       vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
111       const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
112       vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
113       const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
114       vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
115       const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
116       vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
117       const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
118       vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
119       const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
120       vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
121       const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
122       vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
123       int32x4_t vacc0123 = vld1q_s32(b);
124       int32x4_t vacc4567 = vld1q_s32(b + 4);
125       vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
126       int32x4_t vacc89AB = vld1q_s32(b + 8);
127       int32x4_t vaccCDEF = vld1q_s32(b + 12);
128       vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
129 
130       vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
131       vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
132       vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vsum89ABCDEF));
133       vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vsum89ABCDEF));
134 
135       vst1q_s32(b, vacc0123); b += 4;
136       vst1q_s32(b, vacc4567); b += 4;
137       vst1q_s32(b, vacc89AB); b += 4;
138       vst1q_s32(b, vaccCDEF); b += 4;
139     }
140   }
141 
142   i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
143   i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
144   if XNN_UNPREDICTABLE(rows < 2) {
145     i1 = zero;
146   }
147   i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
148   if XNN_UNPREDICTABLE(rows <= 2) {
149     i2 = zero;
150   }
151   i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
152   if XNN_UNPREDICTABLE(rows < 4) {
153     i3 = zero;
154   }
155   i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
156   if XNN_UNPREDICTABLE(rows <= 4) {
157     i4 = zero;
158   }
159   i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
160   if XNN_UNPREDICTABLE(rows < 6) {
161     i5 = zero;
162   }
163   i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
164   if XNN_UNPREDICTABLE(rows <= 6) {
165     i6 = zero;
166   }
167 
168   const float32x4_t vscale = vld1q_dup_f32(&params->fp32_neonv8.scale);
169   const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->fp32_neonv8.output_zero_point);
170   const int8x16_t voutput_min = vld1q_dup_s8(&params->fp32_neonv8.output_min);
171   const int8x16_t voutput_max = vld1q_dup_s8(&params->fp32_neonv8.output_max);
172   for (; channels >= 16; channels -= 16) {
173     const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
174     const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
175     const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
176     const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
177 
178     const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
179     int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
180     const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
181     int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
182 
183     const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
184     vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
185     const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
186     vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
187     const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
188     vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
189     const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
190     vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
191     const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
192     vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
193     const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
194     vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
195     const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
196     vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
197     const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
198     vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
199     int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
200     int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
201     vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
202     int32x4_t vacc89AB = vld1q_s32(buffer); buffer += 4;
203     int32x4_t vaccCDEF = vld1q_s32(buffer); buffer += 4;
204     vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
205 
206     vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
207     vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
208     vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vsum89ABCDEF));
209     vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vsum89ABCDEF));
210 
211     float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
212     float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
213     float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
214     float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
215 
216     vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
217     vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
218     vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
219     vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
220 
221     vacc0123 = vcvtnq_s32_f32(vfpacc0123);
222     vacc4567 = vcvtnq_s32_f32(vfpacc4567);
223     vacc89AB = vcvtnq_s32_f32(vfpacc89AB);
224     vaccCDEF = vcvtnq_s32_f32(vfpaccCDEF);
225 
226     #if XNN_ARCH_ARM64
227       int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
228       int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
229     #else  // !XNN_ARCH_ARM64
230       int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
231       int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
232     #endif  // !XNN_ARCH_ARM64
233 
234     vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
235     vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
236 
237     #if XNN_ARCH_ARM64
238       int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
239     #else  // !XNN_ARCH_ARM64
240       int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
241     #endif  // !XNN_ARCH_ARM64
242 
243     vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
244 
245     vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
246 
247     vst1q_s8(output, vout0123456789ABCDEF); output += 16;
248   }
249   if XNN_UNLIKELY(channels != 0) {
250     do {
251       const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
252       const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
253       const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
254       int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
255 
256       const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
257       vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
258       const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
259       vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
260       const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
261       vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
262       const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
263       vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
264       int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
265       int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
266       vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
267 
268       vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
269       vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
270 
271       float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
272       float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
273 
274       vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
275       vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
276 
277       vacc0123 = vcvtnq_s32_f32(vfpacc0123);
278       vacc4567 = vcvtnq_s32_f32(vfpacc4567);
279 
280       #if XNN_ARCH_ARM64
281         int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
282       #else
283         int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
284       #endif
285       vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
286 
287       int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
288       vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
289       vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
290 
291       if XNN_LIKELY(channels >= 8) {
292         vst1_s8(output, vout01234567); output += 8;
293         channels -= 8;
294       } else {
295         if (channels & 4) {
296           vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
297           vout01234567 = vext_s8(vout01234567, vout01234567, 4);
298         }
299         if (channels & 2) {
300           vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
301           vout01234567 = vext_s8(vout01234567, vout01234567, 2);
302         }
303         if (channels & 1) {
304           vst1_lane_s8(output, vout01234567, 0); output += 1;
305         }
306         channels = 0;
307       }
308     } while (channels != 0);
309   }
310 }
311