xref: /aosp_15_r20/external/XNNPACK/src/qs8-gavgpool/gen/7x-minmax-fp32-neon-c32.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-gavgpool/unipass-neon.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/gavgpool.h>
15 
16 
xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__neon_c32(size_t rows,size_t channels,const int8_t * input,size_t input_stride,const int8_t * zero,int8_t * output,const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__neon_c32(
18     size_t rows,
19     size_t channels,
20     const int8_t* input,
21     size_t input_stride,
22     const int8_t* zero,
23     int8_t* output,
24     const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
25 {
26   assert(rows != 0);
27   assert(rows <= 7);
28   assert(channels != 0);
29 
30   const int8_t* i0 = input;
31   const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
32   if XNN_UNPREDICTABLE(rows < 2) {
33     i1 = zero;
34   }
35   const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
36   if XNN_UNPREDICTABLE(rows <= 2) {
37     i2 = zero;
38   }
39   const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
40   if XNN_UNPREDICTABLE(rows < 4) {
41     i3 = zero;
42   }
43   const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
44   if XNN_UNPREDICTABLE(rows <= 4) {
45     i4 = zero;
46   }
47   const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
48   if XNN_UNPREDICTABLE(rows < 6) {
49     i5 = zero;
50   }
51   const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
52   if XNN_UNPREDICTABLE(rows <= 6) {
53     i6 = zero;
54   }
55 
56   const int32x4_t vinit_bias = vld1q_dup_s32(&params->fp32_neon.init_bias);
57   const float32x4_t vscale = vld1q_dup_f32(&params->fp32_neon.scale);
58   const float32x4_t vmagic_bias = vld1q_dup_f32(&params->fp32_neon.magic_bias);
59   const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(&params->fp32_neon.magic_bias_less_output_zero_point);
60   const int8x16_t voutput_min = vld1q_dup_s8(&params->fp32_neon.output_min);
61   const int8x16_t voutput_max = vld1q_dup_s8(&params->fp32_neon.output_max);
62   for (; channels >= 32; channels -= 32) {
63     const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
64     const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
65     const int8x8_t vi0xGHIJKLMN = vld1_s8(i0); i0 += 8;
66     const int8x8_t vi0xOPQRSTUV = vld1_s8(i0); i0 += 8;
67     const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
68     const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
69     const int8x8_t vi1xGHIJKLMN = vld1_s8(i1); i1 += 8;
70     const int8x8_t vi1xOPQRSTUV = vld1_s8(i1); i1 += 8;
71 
72     const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
73     int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
74     const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
75     int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
76     const int8x8_t vi2xGHIJKLMN = vld1_s8(i2); i2 += 8;
77     int16x8_t vsumGHIJKLMN = vaddl_s8(vi0xGHIJKLMN, vi1xGHIJKLMN);
78     const int8x8_t vi2xOPQRSTUV = vld1_s8(i2); i2 += 8;
79     int16x8_t vsumOPQRSTUV = vaddl_s8(vi0xOPQRSTUV, vi1xOPQRSTUV);
80 
81     const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
82     vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
83     const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
84     vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
85     const int8x8_t vi3xGHIJKLMN = vld1_s8(i3); i3 += 8;
86     vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi2xGHIJKLMN);
87     const int8x8_t vi3xOPQRSTUV = vld1_s8(i3); i3 += 8;
88     vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi2xOPQRSTUV);
89     const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
90     vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
91     const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
92     vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
93     const int8x8_t vi4xGHIJKLMN = vld1_s8(i4); i4 += 8;
94     vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi3xGHIJKLMN);
95     const int8x8_t vi4xOPQRSTUV = vld1_s8(i4); i4 += 8;
96     vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi3xOPQRSTUV);
97     const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
98     vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
99     const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
100     vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
101     const int8x8_t vi5xGHIJKLMN = vld1_s8(i5); i5 += 8;
102     vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi4xGHIJKLMN);
103     const int8x8_t vi5xOPQRSTUV = vld1_s8(i5); i5 += 8;
104     vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi4xOPQRSTUV);
105     const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
106     vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
107     const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
108     vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
109     const int8x8_t vi6xGHIJKLMN = vld1_s8(i6); i6 += 8;
110     vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi5xGHIJKLMN);
111     const int8x8_t vi6xOPQRSTUV = vld1_s8(i6); i6 += 8;
112     vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi5xOPQRSTUV);
113     vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
114     vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
115     vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi6xGHIJKLMN);
116     vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi6xOPQRSTUV);
117 
118     int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
119     int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
120     int32x4_t vacc89AB = vaddw_s16(vinit_bias, vget_low_s16(vsum89ABCDEF));
121     int32x4_t vaccCDEF = vaddw_s16(vinit_bias, vget_high_s16(vsum89ABCDEF));
122     int32x4_t vaccGHIJ = vaddw_s16(vinit_bias, vget_low_s16(vsumGHIJKLMN));
123     int32x4_t vaccKLMN = vaddw_s16(vinit_bias, vget_high_s16(vsumGHIJKLMN));
124     int32x4_t vaccOPQR = vaddw_s16(vinit_bias, vget_low_s16(vsumOPQRSTUV));
125     int32x4_t vaccSTUV = vaddw_s16(vinit_bias, vget_high_s16(vsumOPQRSTUV));
126 
127     float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
128     float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
129     float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
130     float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
131     float32x4_t vfpaccGHIJ = vcvtq_f32_s32(vaccGHIJ);
132     float32x4_t vfpaccKLMN = vcvtq_f32_s32(vaccKLMN);
133     float32x4_t vfpaccOPQR = vcvtq_f32_s32(vaccOPQR);
134     float32x4_t vfpaccSTUV = vcvtq_f32_s32(vaccSTUV);
135 
136     vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
137     vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
138     vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
139     vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
140     vfpaccGHIJ = vmulq_f32(vfpaccGHIJ, vscale);
141     vfpaccKLMN = vmulq_f32(vfpaccKLMN, vscale);
142     vfpaccOPQR = vmulq_f32(vfpaccOPQR, vscale);
143     vfpaccSTUV = vmulq_f32(vfpaccSTUV, vscale);
144 
145     vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
146     vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
147     vacc89AB = vreinterpretq_s32_f32(vaddq_f32(vfpacc89AB, vmagic_bias));
148     vaccCDEF = vreinterpretq_s32_f32(vaddq_f32(vfpaccCDEF, vmagic_bias));
149     vaccGHIJ = vreinterpretq_s32_f32(vaddq_f32(vfpaccGHIJ, vmagic_bias));
150     vaccKLMN = vreinterpretq_s32_f32(vaddq_f32(vfpaccKLMN, vmagic_bias));
151     vaccOPQR = vreinterpretq_s32_f32(vaddq_f32(vfpaccOPQR, vmagic_bias));
152     vaccSTUV = vreinterpretq_s32_f32(vaddq_f32(vfpaccSTUV, vmagic_bias));
153 
154     vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
155     vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
156     vacc89AB = vqsubq_s32(vacc89AB, vmagic_bias_less_output_zero_point);
157     vaccCDEF = vqsubq_s32(vaccCDEF, vmagic_bias_less_output_zero_point);
158     vaccGHIJ = vqsubq_s32(vaccGHIJ, vmagic_bias_less_output_zero_point);
159     vaccKLMN = vqsubq_s32(vaccKLMN, vmagic_bias_less_output_zero_point);
160     vaccOPQR = vqsubq_s32(vaccOPQR, vmagic_bias_less_output_zero_point);
161     vaccSTUV = vqsubq_s32(vaccSTUV, vmagic_bias_less_output_zero_point);
162 
163     #if XNN_ARCH_ARM64
164       int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
165       int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
166       int16x8_t vaccGHIJKLMN = vqmovn_high_s32(vqmovn_s32(vaccGHIJ), vaccKLMN);
167       int16x8_t vaccOPQRSTUV = vqmovn_high_s32(vqmovn_s32(vaccOPQR), vaccSTUV);
168     #else  // !XNN_ARCH_ARM64
169       int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
170       int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
171       int16x8_t vaccGHIJKLMN = vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN));
172       int16x8_t vaccOPQRSTUV = vcombine_s16(vqmovn_s32(vaccOPQR), vqmovn_s32(vaccSTUV));
173     #endif  // !XNN_ARCH_ARM64
174 
175 
176     #if XNN_ARCH_ARM64
177       int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
178       int8x16_t voutGHIJKLMNOPQRSTUV = vqmovn_high_s16(vqmovn_s16(vaccGHIJKLMN), vaccOPQRSTUV);
179     #else  // !XNN_ARCH_ARM64
180       int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
181       int8x16_t voutGHIJKLMNOPQRSTUV = vcombine_s8(vqmovn_s16(vaccGHIJKLMN), vqmovn_s16(vaccOPQRSTUV));
182     #endif  // !XNN_ARCH_ARM64
183 
184     vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
185     voutGHIJKLMNOPQRSTUV = vmaxq_s8(voutGHIJKLMNOPQRSTUV, voutput_min);
186 
187     vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
188     voutGHIJKLMNOPQRSTUV = vminq_s8(voutGHIJKLMNOPQRSTUV, voutput_max);
189 
190     vst1q_s8(output, vout0123456789ABCDEF); output += 16;
191     vst1q_s8(output, voutGHIJKLMNOPQRSTUV); output += 16;
192   }
193   if XNN_UNLIKELY(channels != 0) {
194     do {
195       const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
196       const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
197       const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
198       int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
199 
200       const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
201       vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
202       const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
203       vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
204       const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
205       vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
206       const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
207       vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
208       vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
209 
210       int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
211       int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
212 
213       float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
214       float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
215 
216       vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
217       vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
218 
219       vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
220       vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
221 
222       vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
223       vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
224 
225       #if XNN_ARCH_ARM64
226         int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
227       #else
228         int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
229       #endif
230 
231       int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
232       vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
233       vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
234 
235       if XNN_LIKELY(channels >= 8) {
236         vst1_s8(output, vout01234567); output += 8;
237         channels -= 8;
238       } else {
239         if (channels & 4) {
240           vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
241           vout01234567 = vext_s8(vout01234567, vout01234567, 4);
242         }
243         if (channels & 2) {
244           vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
245           vout01234567 = vext_s8(vout01234567, vout01234567, 2);
246         }
247         if (channels & 1) {
248           vst1_lane_s8(output, vout01234567, 0); output += 1;
249         }
250         channels = 0;
251       }
252     } while (channels != 0);
253   }
254 }
255