1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-gavgpool/unipass-wasmsimd.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <wasm_simd128.h>
13
14 #include <xnnpack/gavgpool.h>
15
16
xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__wasmsimd_c32(size_t rows,size_t channels,const int8_t * input,size_t input_stride,const int8_t * zero,int8_t * output,const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__wasmsimd_c32(
18 size_t rows,
19 size_t channels,
20 const int8_t* input,
21 size_t input_stride,
22 const int8_t* zero,
23 int8_t* output,
24 const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
25 {
26 assert(rows != 0);
27 assert(rows <= 7);
28 assert(channels != 0);
29
30 const int8_t* i0 = input;
31 const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
32 if XNN_UNPREDICTABLE(rows < 2) {
33 i1 = zero;
34 }
35 const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
36 if XNN_UNPREDICTABLE(rows <= 2) {
37 i2 = zero;
38 }
39 const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
40 if XNN_UNPREDICTABLE(rows < 4) {
41 i3 = zero;
42 }
43 const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
44 if XNN_UNPREDICTABLE(rows <= 4) {
45 i4 = zero;
46 }
47 const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
48 if XNN_UNPREDICTABLE(rows < 6) {
49 i5 = zero;
50 }
51 const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
52 if XNN_UNPREDICTABLE(rows <= 6) {
53 i6 = zero;
54 }
55
56 const v128_t vinit_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.init_bias);
57 const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
58 const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
59 const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
60 const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
61 const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
62 for (; channels >= 32; channels -= 32) {
63 const v128_t vxi0x01234567 = wasm_i16x8_load8x8(i0);
64 const v128_t vxi0x89ABCDEF = wasm_i16x8_load8x8(i0 + 8);
65 const v128_t vxi0xGHIJKLMN = wasm_i16x8_load8x8(i0 + 16);
66 const v128_t vxi0xOPQRSTUV = wasm_i16x8_load8x8(i0 + 24);
67 i0 += 32;
68 const v128_t vxi1x01234567 = wasm_i16x8_load8x8(i1);
69 const v128_t vxi1x89ABCDEF = wasm_i16x8_load8x8(i1 + 8);
70 const v128_t vxi1xGHIJKLMN = wasm_i16x8_load8x8(i1 + 16);
71 const v128_t vxi1xOPQRSTUV = wasm_i16x8_load8x8(i1 + 24);
72 i1 += 32;
73
74 v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
75 const v128_t vxi2x01234567 = wasm_i16x8_load8x8(i2);
76 v128_t vacc89ABCDEF = wasm_i16x8_add(vxi0x89ABCDEF, vxi1x89ABCDEF);
77 const v128_t vxi2x89ABCDEF = wasm_i16x8_load8x8(i2 + 8);
78 v128_t vaccGHIJKLMN = wasm_i16x8_add(vxi0xGHIJKLMN, vxi1xGHIJKLMN);
79 const v128_t vxi2xGHIJKLMN = wasm_i16x8_load8x8(i2 + 16);
80 v128_t vaccOPQRSTUV = wasm_i16x8_add(vxi0xOPQRSTUV, vxi1xOPQRSTUV);
81 const v128_t vxi2xOPQRSTUV = wasm_i16x8_load8x8(i2 + 24);
82 i2 += 32;
83
84 vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
85 const v128_t vxi3x01234567 = wasm_i16x8_load8x8(i3);
86 vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi2x89ABCDEF);
87 const v128_t vxi3x89ABCDEF = wasm_i16x8_load8x8(i3 + 8);
88 vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi2xGHIJKLMN);
89 const v128_t vxi3xGHIJKLMN = wasm_i16x8_load8x8(i3 + 16);
90 vaccOPQRSTUV = wasm_i16x8_add(vaccOPQRSTUV, vxi2xOPQRSTUV);
91 const v128_t vxi3xOPQRSTUV = wasm_i16x8_load8x8(i3 + 24);
92 i3 += 32;
93 vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
94 const v128_t vxi4x01234567 = wasm_i16x8_load8x8(i4);
95 vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi3x89ABCDEF);
96 const v128_t vxi4x89ABCDEF = wasm_i16x8_load8x8(i4 + 8);
97 vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi3xGHIJKLMN);
98 const v128_t vxi4xGHIJKLMN = wasm_i16x8_load8x8(i4 + 16);
99 vaccOPQRSTUV = wasm_i16x8_add(vaccOPQRSTUV, vxi3xOPQRSTUV);
100 const v128_t vxi4xOPQRSTUV = wasm_i16x8_load8x8(i4 + 24);
101 i4 += 32;
102 vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
103 const v128_t vxi5x01234567 = wasm_i16x8_load8x8(i5);
104 vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi4x89ABCDEF);
105 const v128_t vxi5x89ABCDEF = wasm_i16x8_load8x8(i5 + 8);
106 vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi4xGHIJKLMN);
107 const v128_t vxi5xGHIJKLMN = wasm_i16x8_load8x8(i5 + 16);
108 vaccOPQRSTUV = wasm_i16x8_add(vaccOPQRSTUV, vxi4xOPQRSTUV);
109 const v128_t vxi5xOPQRSTUV = wasm_i16x8_load8x8(i5 + 24);
110 i5 += 32;
111 vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
112 const v128_t vxi6x01234567 = wasm_i16x8_load8x8(i6);
113 vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi5x89ABCDEF);
114 const v128_t vxi6x89ABCDEF = wasm_i16x8_load8x8(i6 + 8);
115 vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi5xGHIJKLMN);
116 const v128_t vxi6xGHIJKLMN = wasm_i16x8_load8x8(i6 + 16);
117 vaccOPQRSTUV = wasm_i16x8_add(vaccOPQRSTUV, vxi5xOPQRSTUV);
118 const v128_t vxi6xOPQRSTUV = wasm_i16x8_load8x8(i6 + 24);
119 i6 += 32;
120
121 vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
122 vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi6x89ABCDEF);
123 vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi6xGHIJKLMN);
124 vaccOPQRSTUV = wasm_i16x8_add(vaccOPQRSTUV, vxi6xOPQRSTUV);
125
126 v128_t vacc0123 = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_low_i16x8(vacc01234567));
127 v128_t vacc4567 = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_high_i16x8(vacc01234567));
128 v128_t vacc89AB = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_low_i16x8(vacc89ABCDEF));
129 v128_t vaccCDEF = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_high_i16x8(vacc89ABCDEF));
130 v128_t vaccGHIJ = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_low_i16x8(vaccGHIJKLMN));
131 v128_t vaccKLMN = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_high_i16x8(vaccGHIJKLMN));
132 v128_t vaccOPQR = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_low_i16x8(vaccOPQRSTUV));
133 v128_t vaccSTUV = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_high_i16x8(vaccOPQRSTUV));
134
135 vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
136 vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
137 vacc89AB = wasm_f32x4_convert_i32x4(vacc89AB);
138 vaccCDEF = wasm_f32x4_convert_i32x4(vaccCDEF);
139 vaccGHIJ = wasm_f32x4_convert_i32x4(vaccGHIJ);
140 vaccKLMN = wasm_f32x4_convert_i32x4(vaccKLMN);
141 vaccOPQR = wasm_f32x4_convert_i32x4(vaccOPQR);
142 vaccSTUV = wasm_f32x4_convert_i32x4(vaccSTUV);
143
144 vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
145 vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
146 vacc89AB = wasm_f32x4_mul(vacc89AB, vscale);
147 vaccCDEF = wasm_f32x4_mul(vaccCDEF, vscale);
148 vaccGHIJ = wasm_f32x4_mul(vaccGHIJ, vscale);
149 vaccKLMN = wasm_f32x4_mul(vaccKLMN, vscale);
150 vaccOPQR = wasm_f32x4_mul(vaccOPQR, vscale);
151 vaccSTUV = wasm_f32x4_mul(vaccSTUV, vscale);
152
153 vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
154 vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
155 vacc89AB = wasm_f32x4_add(vacc89AB, vmagic_bias);
156 vaccCDEF = wasm_f32x4_add(vaccCDEF, vmagic_bias);
157 vaccGHIJ = wasm_f32x4_add(vaccGHIJ, vmagic_bias);
158 vaccKLMN = wasm_f32x4_add(vaccKLMN, vmagic_bias);
159 vaccOPQR = wasm_f32x4_add(vaccOPQR, vmagic_bias);
160 vaccSTUV = wasm_f32x4_add(vaccSTUV, vmagic_bias);
161
162 vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
163 vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
164 vacc89AB = wasm_i32x4_max(vacc89AB, vmagic_min);
165 vaccCDEF = wasm_i32x4_max(vaccCDEF, vmagic_min);
166 vaccGHIJ = wasm_i32x4_max(vaccGHIJ, vmagic_min);
167 vaccKLMN = wasm_i32x4_max(vaccKLMN, vmagic_min);
168 vaccOPQR = wasm_i32x4_max(vaccOPQR, vmagic_min);
169 vaccSTUV = wasm_i32x4_max(vaccSTUV, vmagic_min);
170
171 vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
172 vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
173 vacc89AB = wasm_i32x4_sub(vacc89AB, vmagic_bias_less_output_zero_point);
174 vaccCDEF = wasm_i32x4_sub(vaccCDEF, vmagic_bias_less_output_zero_point);
175 vaccGHIJ = wasm_i32x4_sub(vaccGHIJ, vmagic_bias_less_output_zero_point);
176 vaccKLMN = wasm_i32x4_sub(vaccKLMN, vmagic_bias_less_output_zero_point);
177 vaccOPQR = wasm_i32x4_sub(vaccOPQR, vmagic_bias_less_output_zero_point);
178 vaccSTUV = wasm_i32x4_sub(vaccSTUV, vmagic_bias_less_output_zero_point);
179
180 v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
181 v128_t vout89ABCDEF = wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF);
182 v128_t voutGHIJKLMN = wasm_i16x8_narrow_i32x4(vaccGHIJ, vaccKLMN);
183 v128_t voutOPQRSTUV = wasm_i16x8_narrow_i32x4(vaccOPQR, vaccSTUV);
184
185 v128_t vout0123456789ABCDEF = wasm_i8x16_narrow_i16x8(vout01234567, vout89ABCDEF);
186 v128_t voutGHIJKLMNOPQRSTUV = wasm_i8x16_narrow_i16x8(voutGHIJKLMN, voutOPQRSTUV);
187
188 vout0123456789ABCDEF = wasm_i8x16_min(vout0123456789ABCDEF, voutput_max);
189 voutGHIJKLMNOPQRSTUV = wasm_i8x16_min(voutGHIJKLMNOPQRSTUV, voutput_max);
190
191 wasm_v128_store(output, vout0123456789ABCDEF);
192 wasm_v128_store(output + 16, voutGHIJKLMNOPQRSTUV);
193 output += 32;
194 }
195 if XNN_UNLIKELY(channels != 0) {
196 do {
197 const v128_t vxi0x01234567 = wasm_i16x8_load8x8(i0);
198 i0 += 8;
199 const v128_t vxi1x01234567 = wasm_i16x8_load8x8(i1);
200 i1 += 8;
201
202 v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
203 const v128_t vxi2x01234567 = wasm_i16x8_load8x8(i2);
204 i2 += 8;
205
206 vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
207 const v128_t vxi3x01234567 = wasm_i16x8_load8x8(i3);
208 i3 += 8;
209 vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
210 const v128_t vxi4x01234567 = wasm_i16x8_load8x8(i4);
211 i4 += 8;
212 vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
213 const v128_t vxi5x01234567 = wasm_i16x8_load8x8(i5);
214 i5 += 8;
215 vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
216 const v128_t vxi6x01234567 = wasm_i16x8_load8x8(i6);
217 i6 += 8;
218
219 vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
220
221 v128_t vacc0123 = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_low_i16x8(vacc01234567));
222 v128_t vacc4567 = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_high_i16x8(vacc01234567));
223
224 vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
225 vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
226
227 vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
228 vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
229
230 vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
231 vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
232
233 vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
234 vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
235
236 vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
237 vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
238
239 const v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
240 v128_t vout0123456701234567 = wasm_i8x16_narrow_i16x8(vout01234567, vout01234567);
241 vout0123456701234567 = wasm_i8x16_min(vout0123456701234567, voutput_max);
242
243 if XNN_LIKELY(channels >= 8) {
244 *((double*) output) = wasm_f64x2_extract_lane(vout0123456701234567, 0);
245 output += 8;
246 channels -= 8;
247 } else {
248 if (channels & 4) {
249 *((float*) output) = wasm_f32x4_extract_lane(vout0123456701234567, 0);
250 vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
251 output += 4;
252 }
253 uint32_t vout0123 = wasm_i32x4_extract_lane(vout0123456701234567, 0);
254 if (channels & 2) {
255 *((uint16_t*) output) = (uint16_t) vout0123;
256 vout0123 >>= 16;
257 output += 2;
258 }
259 if (channels & 1) {
260 *output = (int8_t) vout0123;
261 output += 1;
262 }
263 channels = 0;
264 }
265 } while (channels != 0);
266 }
267 }
268