1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-gavgpool/multipass-neon.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/gavgpool.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 #include <xnnpack/math.h>
17
18
xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__neonv8_c8(size_t rows,size_t channels,const uint8_t * input,size_t input_stride,const uint8_t * zero,int32_t * buffer,uint8_t * output,const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__neonv8_c8(
20 size_t rows,
21 size_t channels,
22 const uint8_t* input,
23 size_t input_stride,
24 const uint8_t* zero,
25 int32_t* buffer,
26 uint8_t* output,
27 const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28 {
29 assert(rows > 7);
30 assert(channels != 0);
31
32 const uint8_t* i0 = input;
33 const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
34 const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
35 const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
36 const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
37 const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
38 const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
39 const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(uint8_t);
40
41 const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->fp32_neonv8.init_bias);
42 int32_t* b = buffer;
43 size_t c = channels;
44 for (; c != 0; c = doz(c, 8)) {
45 const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
46 const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
47
48 const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
49 uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
50
51 const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
52 vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
53 const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
54 vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
55 const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
56 vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
57 const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
58 vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
59 vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
60
61 const int32x4_t vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_low_u16(vsum01234567)));
62 const int32x4_t vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vinit_bias), vget_high_u16(vsum01234567)));
63
64 vst1q_s32(b, vacc0123); b += 4;
65 vst1q_s32(b, vacc4567); b += 4;
66 }
67
68 for (rows -= 7; rows > 7; rows -= 7) {
69 i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
70 i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
71 i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
72 i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
73 i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
74 i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
75 i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
76
77 int32_t* b = buffer;
78 size_t c = channels;
79 for (; c != 0; c = doz(c, 8)) {
80 const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
81 const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
82
83 const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
84 uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
85
86 const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
87 vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
88 const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
89 vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
90 const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
91 vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
92 const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
93 vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
94 int32x4_t vacc0123 = vld1q_s32(b);
95 int32x4_t vacc4567 = vld1q_s32(b + 4);
96 vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
97
98 vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vsum01234567)));
99 vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vsum01234567)));
100
101 vst1q_s32(b, vacc0123); b += 4;
102 vst1q_s32(b, vacc4567); b += 4;
103 }
104 }
105
106 i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
107 i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
108 if XNN_UNPREDICTABLE(rows < 2) {
109 i1 = zero;
110 }
111 i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
112 if XNN_UNPREDICTABLE(rows <= 2) {
113 i2 = zero;
114 }
115 i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
116 if XNN_UNPREDICTABLE(rows < 4) {
117 i3 = zero;
118 }
119 i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
120 if XNN_UNPREDICTABLE(rows <= 4) {
121 i4 = zero;
122 }
123 i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
124 if XNN_UNPREDICTABLE(rows < 6) {
125 i5 = zero;
126 }
127 i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
128 if XNN_UNPREDICTABLE(rows <= 6) {
129 i6 = zero;
130 }
131
132 const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
133 const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
134 const uint8x8_t voutput_min = vld1_dup_u8(¶ms->fp32_neonv8.output_min);
135 const uint8x8_t voutput_max = vld1_dup_u8(¶ms->fp32_neonv8.output_max);
136 for (; channels >= 8; channels -= 8) {
137 const uint8x8_t vi0x01234567 = vld1_u8(i0); i0 += 8;
138 const uint8x8_t vi1x01234567 = vld1_u8(i1); i1 += 8;
139
140 const uint8x8_t vi2x01234567 = vld1_u8(i2); i2 += 8;
141 uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
142
143 const uint8x8_t vi3x01234567 = vld1_u8(i3); i3 += 8;
144 vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
145 const uint8x8_t vi4x01234567 = vld1_u8(i4); i4 += 8;
146 vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
147 const uint8x8_t vi5x01234567 = vld1_u8(i5); i5 += 8;
148 vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
149 const uint8x8_t vi6x01234567 = vld1_u8(i6); i6 += 8;
150 vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
151 int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
152 int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
153 vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
154
155 vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vsum01234567)));
156 vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vsum01234567)));
157
158 float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
159 float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
160
161 vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
162 vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
163
164 vacc0123 = vcvtnq_s32_f32(vfpacc0123);
165 vacc4567 = vcvtnq_s32_f32(vfpacc4567);
166
167 #if XNN_ARCH_ARM64
168 int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
169 #else // !XNN_ARCH_ARM64
170 int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
171 #endif // !XNN_ARCH_ARM64
172
173 vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
174
175 #if XNN_ARCH_ARM64
176 uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
177 #else // !XNN_ARCH_ARM64
178 uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
179 #endif // !XNN_ARCH_ARM64
180
181 vout01234567 = vmax_u8(vout01234567, voutput_min);
182
183 vout01234567 = vmin_u8(vout01234567, voutput_max);
184
185 vst1_u8(output, vout01234567); output += 8;
186 }
187 if XNN_UNLIKELY(channels != 0) {
188 {
189 const uint8x8_t vi0x01234567 = vld1_u8(i0);
190 const uint8x8_t vi1x01234567 = vld1_u8(i1);
191 const uint8x8_t vi2x01234567 = vld1_u8(i2);
192 uint16x8_t vsum01234567 = vaddl_u8(vi0x01234567, vi1x01234567);
193
194 const uint8x8_t vi3x01234567 = vld1_u8(i3);
195 vsum01234567 = vaddw_u8(vsum01234567, vi2x01234567);
196 const uint8x8_t vi4x01234567 = vld1_u8(i4);
197 vsum01234567 = vaddw_u8(vsum01234567, vi3x01234567);
198 const uint8x8_t vi5x01234567 = vld1_u8(i5);
199 vsum01234567 = vaddw_u8(vsum01234567, vi4x01234567);
200 const uint8x8_t vi6x01234567 = vld1_u8(i6);
201 vsum01234567 = vaddw_u8(vsum01234567, vi5x01234567);
202 int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
203 int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
204 vsum01234567 = vaddw_u8(vsum01234567, vi6x01234567);
205
206 vacc0123 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc0123), vget_low_u16(vsum01234567)));
207 vacc4567 = vreinterpretq_s32_u32(vaddw_u16(vreinterpretq_u32_s32(vacc4567), vget_high_u16(vsum01234567)));
208
209 float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
210 float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
211
212 vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
213 vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
214
215 vacc0123 = vcvtnq_s32_f32(vfpacc0123);
216 vacc4567 = vcvtnq_s32_f32(vfpacc4567);
217
218 #if XNN_ARCH_ARM64
219 int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
220 #else
221 int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
222 #endif
223 vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
224
225 uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
226 vout01234567 = vmax_u8(vout01234567, voutput_min);
227 vout01234567 = vmin_u8(vout01234567, voutput_max);
228
229 if (channels & 4) {
230 vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
231 vout01234567 = vext_u8(vout01234567, vout01234567, 4);
232 }
233 if (channels & 2) {
234 vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
235 vout01234567 = vext_u8(vout01234567, vout01234567, 2);
236 }
237 if (channels & 1) {
238 vst1_lane_u8(output, vout01234567, 0);
239 }
240 }
241 }
242 }
243