xref: /aosp_15_r20/external/XNNPACK/src/qu8-gavgpool/gen/7p7x-minmax-fp32-wasmsimd-c32.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-gavgpool/multipass-wasmsimd.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 #include <xnnpack/gavgpool.h>
15 #include <xnnpack/math.h>
16 
17 
xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__wasmsimd_c32(size_t rows,size_t channels,const uint8_t * input,size_t input_stride,const uint8_t * zero,int32_t * buffer,uint8_t * output,const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qu8_gavgpool_minmax_fp32_ukernel_7p7x__wasmsimd_c32(
19     size_t rows,
20     size_t channels,
21     const uint8_t* input,
22     size_t input_stride,
23     const uint8_t* zero,
24     int32_t* buffer,
25     uint8_t* output,
26     const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
27 {
28   assert(rows > 7);
29   assert(channels != 0);
30 
31   const uint8_t* i0 = input;
32   const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride);
33   const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride);
34   const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride);
35   const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride);
36   const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride);
37   const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride);
38   const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(uint8_t);
39 
40   const v128_t vinit_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.init_bias);
41   int32_t* b = buffer;
42   size_t c = channels;
43   for (; c >= 32; c -= 32) {
44     const v128_t vxi0x01234567 = wasm_u16x8_load8x8(i0);
45     const v128_t vxi0x89ABCDEF = wasm_u16x8_load8x8(i0 + 8);
46     const v128_t vxi0xGHIJKLMN = wasm_u16x8_load8x8(i0 + 16);
47     const v128_t vxi0xOPQRSTUV = wasm_u16x8_load8x8(i0 + 24);
48     i0 += 32;
49     const v128_t vxi1x01234567 = wasm_u16x8_load8x8(i1);
50     const v128_t vxi1x89ABCDEF = wasm_u16x8_load8x8(i1 + 8);
51     const v128_t vxi1xGHIJKLMN = wasm_u16x8_load8x8(i1 + 16);
52     const v128_t vxi1xOPQRSTUV = wasm_u16x8_load8x8(i1 + 24);
53     i1 += 32;
54 
55     v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
56     const v128_t vxi2x01234567 = wasm_u16x8_load8x8(i2);
57     v128_t vacc89ABCDEF = wasm_i16x8_add(vxi0x89ABCDEF, vxi1x89ABCDEF);
58     const v128_t vxi2x89ABCDEF = wasm_u16x8_load8x8(i2 + 8);
59     v128_t vaccGHIJKLMN = wasm_i16x8_add(vxi0xGHIJKLMN, vxi1xGHIJKLMN);
60     const v128_t vxi2xGHIJKLMN = wasm_u16x8_load8x8(i2 + 16);
61     v128_t vaccOPQRSTUV = wasm_i16x8_add(vxi0xOPQRSTUV, vxi1xOPQRSTUV);
62     const v128_t vxi2xOPQRSTUV = wasm_u16x8_load8x8(i2 + 24);
63     i2 += 32;
64 
65     vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
66     const v128_t vxi3x01234567 = wasm_u16x8_load8x8(i3);
67     vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi2x89ABCDEF);
68     const v128_t vxi3x89ABCDEF = wasm_u16x8_load8x8(i3 + 8);
69     vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi2xGHIJKLMN);
70     const v128_t vxi3xGHIJKLMN = wasm_u16x8_load8x8(i3 + 16);
71     vaccOPQRSTUV = wasm_i16x8_add(vaccOPQRSTUV, vxi2xOPQRSTUV);
72     const v128_t vxi3xOPQRSTUV = wasm_u16x8_load8x8(i3 + 24);
73     i3 += 32;
74     vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
75     const v128_t vxi4x01234567 = wasm_u16x8_load8x8(i4);
76     vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi3x89ABCDEF);
77     const v128_t vxi4x89ABCDEF = wasm_u16x8_load8x8(i4 + 8);
78     vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi3xGHIJKLMN);
79     const v128_t vxi4xGHIJKLMN = wasm_u16x8_load8x8(i4 + 16);
80     vaccOPQRSTUV = wasm_i16x8_add(vaccOPQRSTUV, vxi3xOPQRSTUV);
81     const v128_t vxi4xOPQRSTUV = wasm_u16x8_load8x8(i4 + 24);
82     i4 += 32;
83     vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
84     const v128_t vxi5x01234567 = wasm_u16x8_load8x8(i5);
85     vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi4x89ABCDEF);
86     const v128_t vxi5x89ABCDEF = wasm_u16x8_load8x8(i5 + 8);
87     vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi4xGHIJKLMN);
88     const v128_t vxi5xGHIJKLMN = wasm_u16x8_load8x8(i5 + 16);
89     vaccOPQRSTUV = wasm_i16x8_add(vaccOPQRSTUV, vxi4xOPQRSTUV);
90     const v128_t vxi5xOPQRSTUV = wasm_u16x8_load8x8(i5 + 24);
91     i5 += 32;
92     vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
93     const v128_t vxi6x01234567 = wasm_u16x8_load8x8(i6);
94     vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi5x89ABCDEF);
95     const v128_t vxi6x89ABCDEF = wasm_u16x8_load8x8(i6 + 8);
96     vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi5xGHIJKLMN);
97     const v128_t vxi6xGHIJKLMN = wasm_u16x8_load8x8(i6 + 16);
98     vaccOPQRSTUV = wasm_i16x8_add(vaccOPQRSTUV, vxi5xOPQRSTUV);
99     const v128_t vxi6xOPQRSTUV = wasm_u16x8_load8x8(i6 + 24);
100     i6 += 32;
101 
102     vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
103     vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi6x89ABCDEF);
104     vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi6xGHIJKLMN);
105     vaccOPQRSTUV = wasm_i16x8_add(vaccOPQRSTUV, vxi6xOPQRSTUV);
106 
107     const v128_t vacc0123 = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_low_u16x8(vacc01234567));
108     const v128_t vacc4567 = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_high_u16x8(vacc01234567));
109     const v128_t vacc89AB = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_low_u16x8(vacc89ABCDEF));
110     const v128_t vaccCDEF = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_high_u16x8(vacc89ABCDEF));
111     const v128_t vaccGHIJ = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_low_u16x8(vaccGHIJKLMN));
112     const v128_t vaccKLMN = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_high_u16x8(vaccGHIJKLMN));
113     const v128_t vaccOPQR = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_low_u16x8(vaccOPQRSTUV));
114     const v128_t vaccSTUV = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_high_u16x8(vaccOPQRSTUV));
115 
116     wasm_v128_store(b, vacc0123);
117     wasm_v128_store(b + 4, vacc4567);
118     wasm_v128_store(b + 8, vacc89AB);
119     wasm_v128_store(b + 12, vaccCDEF);
120     wasm_v128_store(b + 16, vaccGHIJ);
121     wasm_v128_store(b + 20, vaccKLMN);
122     wasm_v128_store(b + 24, vaccOPQR);
123     wasm_v128_store(b + 28, vaccSTUV);
124     b += 32;
125   }
126   if XNN_UNLIKELY(c != 0) {
127     do {
128       const v128_t vxi0x01234567 = wasm_u16x8_load8x8(i0);
129       i0 += 8;
130       const v128_t vxi1x01234567 = wasm_u16x8_load8x8(i1);
131       i1 += 8;
132 
133       v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
134       const v128_t vxi2x01234567 = wasm_u16x8_load8x8(i2);
135       i2 += 8;
136 
137       vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
138       const v128_t vxi3x01234567 = wasm_u16x8_load8x8(i3);
139       i3 += 8;
140       vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
141       const v128_t vxi4x01234567 = wasm_u16x8_load8x8(i4);
142       i4 += 8;
143       vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
144       const v128_t vxi5x01234567 = wasm_u16x8_load8x8(i5);
145       i5 += 8;
146       vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
147       const v128_t vxi6x01234567 = wasm_u16x8_load8x8(i6);
148       i6 += 8;
149 
150       vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
151 
152       const v128_t vacc0123 = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_low_u16x8(vacc01234567));
153       const v128_t vacc4567 = wasm_i32x4_add(vinit_bias, wasm_u32x4_extend_high_u16x8(vacc01234567));
154 
155       wasm_v128_store(b, vacc0123);
156       wasm_v128_store(b + 4, vacc4567);
157       b += 8;
158 
159       c = doz(c, 8);
160     } while (c != 0);
161   }
162 
163   for (rows -= 7; rows > 7; rows -= 7) {
164     i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
165     i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
166     i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
167     i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
168     i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
169     i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
170     i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
171 
172     int32_t* b = buffer;
173     size_t c = channels;
174     for (; c >= 32; c -= 32) {
175       const v128_t vxi0x01234567 = wasm_u16x8_load8x8(i0);
176       const v128_t vxi0x89ABCDEF = wasm_u16x8_load8x8(i0 + 8);
177       const v128_t vxi0xGHIJKLMN = wasm_u16x8_load8x8(i0 + 16);
178       const v128_t vxi0xOPQRSTUV = wasm_u16x8_load8x8(i0 + 24);
179       i0 += 32;
180       const v128_t vxi1x01234567 = wasm_u16x8_load8x8(i1);
181       const v128_t vxi1x89ABCDEF = wasm_u16x8_load8x8(i1 + 8);
182       const v128_t vxi1xGHIJKLMN = wasm_u16x8_load8x8(i1 + 16);
183       const v128_t vxi1xOPQRSTUV = wasm_u16x8_load8x8(i1 + 24);
184       i1 += 32;
185 
186       v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
187       const v128_t vxi2x01234567 = wasm_u16x8_load8x8(i2);
188       v128_t vacc89ABCDEF = wasm_i16x8_add(vxi0x89ABCDEF, vxi1x89ABCDEF);
189       const v128_t vxi2x89ABCDEF = wasm_u16x8_load8x8(i2 + 8);
190       v128_t vaccGHIJKLMN = wasm_i16x8_add(vxi0xGHIJKLMN, vxi1xGHIJKLMN);
191       const v128_t vxi2xGHIJKLMN = wasm_u16x8_load8x8(i2 + 16);
192       v128_t vaccOPQRSTUV = wasm_i16x8_add(vxi0xOPQRSTUV, vxi1xOPQRSTUV);
193       const v128_t vxi2xOPQRSTUV = wasm_u16x8_load8x8(i2 + 24);
194       i2 += 32;
195 
196       vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
197       const v128_t vxi3x01234567 = wasm_u16x8_load8x8(i3);
198       vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi2x89ABCDEF);
199       const v128_t vxi3x89ABCDEF = wasm_u16x8_load8x8(i3 + 8);
200       vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi2xGHIJKLMN);
201       const v128_t vxi3xGHIJKLMN = wasm_u16x8_load8x8(i3 + 16);
202       vaccOPQRSTUV = wasm_i16x8_add(vaccOPQRSTUV, vxi2xOPQRSTUV);
203       const v128_t vxi3xOPQRSTUV = wasm_u16x8_load8x8(i3 + 24);
204       i3 += 32;
205       vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
206       const v128_t vxi4x01234567 = wasm_u16x8_load8x8(i4);
207       vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi3x89ABCDEF);
208       const v128_t vxi4x89ABCDEF = wasm_u16x8_load8x8(i4 + 8);
209       vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi3xGHIJKLMN);
210       const v128_t vxi4xGHIJKLMN = wasm_u16x8_load8x8(i4 + 16);
211       vaccOPQRSTUV = wasm_i16x8_add(vaccOPQRSTUV, vxi3xOPQRSTUV);
212       const v128_t vxi4xOPQRSTUV = wasm_u16x8_load8x8(i4 + 24);
213       i4 += 32;
214       vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
215       const v128_t vxi5x01234567 = wasm_u16x8_load8x8(i5);
216       vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi4x89ABCDEF);
217       const v128_t vxi5x89ABCDEF = wasm_u16x8_load8x8(i5 + 8);
218       vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi4xGHIJKLMN);
219       const v128_t vxi5xGHIJKLMN = wasm_u16x8_load8x8(i5 + 16);
220       vaccOPQRSTUV = wasm_i16x8_add(vaccOPQRSTUV, vxi4xOPQRSTUV);
221       const v128_t vxi5xOPQRSTUV = wasm_u16x8_load8x8(i5 + 24);
222       i5 += 32;
223       vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
224       const v128_t vxi6x01234567 = wasm_u16x8_load8x8(i6);
225       vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi5x89ABCDEF);
226       const v128_t vxi6x89ABCDEF = wasm_u16x8_load8x8(i6 + 8);
227       vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi5xGHIJKLMN);
228       const v128_t vxi6xGHIJKLMN = wasm_u16x8_load8x8(i6 + 16);
229       vaccOPQRSTUV = wasm_i16x8_add(vaccOPQRSTUV, vxi5xOPQRSTUV);
230       const v128_t vxi6xOPQRSTUV = wasm_u16x8_load8x8(i6 + 24);
231       i6 += 32;
232 
233       vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
234       vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi6x89ABCDEF);
235       vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi6xGHIJKLMN);
236       vaccOPQRSTUV = wasm_i16x8_add(vaccOPQRSTUV, vxi6xOPQRSTUV);
237 
238       v128_t vacc0123 = wasm_v128_load(b);
239       v128_t vacc4567 = wasm_v128_load(b + 4);
240       v128_t vacc89AB = wasm_v128_load(b + 8);
241       v128_t vaccCDEF = wasm_v128_load(b + 12);
242       v128_t vaccGHIJ = wasm_v128_load(b + 16);
243       v128_t vaccKLMN = wasm_v128_load(b + 20);
244       v128_t vaccOPQR = wasm_v128_load(b + 24);
245       v128_t vaccSTUV = wasm_v128_load(b + 28);
246 
247       vacc0123 = wasm_i32x4_add(vacc0123, wasm_u32x4_extend_low_u16x8(vacc01234567));
248       vacc4567 = wasm_i32x4_add(vacc4567, wasm_u32x4_extend_high_u16x8(vacc01234567));
249       vacc89AB = wasm_i32x4_add(vacc89AB, wasm_u32x4_extend_low_u16x8(vacc89ABCDEF));
250       vaccCDEF = wasm_i32x4_add(vaccCDEF, wasm_u32x4_extend_high_u16x8(vacc89ABCDEF));
251       vaccGHIJ = wasm_i32x4_add(vaccGHIJ, wasm_u32x4_extend_low_u16x8(vaccGHIJKLMN));
252       vaccKLMN = wasm_i32x4_add(vaccKLMN, wasm_u32x4_extend_high_u16x8(vaccGHIJKLMN));
253       vaccOPQR = wasm_i32x4_add(vaccOPQR, wasm_u32x4_extend_low_u16x8(vaccOPQRSTUV));
254       vaccSTUV = wasm_i32x4_add(vaccSTUV, wasm_u32x4_extend_high_u16x8(vaccOPQRSTUV));
255 
256       wasm_v128_store(b, vacc0123);
257       wasm_v128_store(b + 4, vacc4567);
258       wasm_v128_store(b + 8, vacc89AB);
259       wasm_v128_store(b + 12, vaccCDEF);
260       wasm_v128_store(b + 16, vaccGHIJ);
261       wasm_v128_store(b + 20, vaccKLMN);
262       wasm_v128_store(b + 24, vaccOPQR);
263       wasm_v128_store(b + 28, vaccSTUV);
264       b += 32;
265     }
266     if XNN_UNLIKELY(c != 0) {
267       do {
268         const v128_t vxi0x01234567 = wasm_u16x8_load8x8(i0);
269         i0 += 8;
270         const v128_t vxi1x01234567 = wasm_u16x8_load8x8(i1);
271         i1 += 8;
272 
273         v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
274         const v128_t vxi2x01234567 = wasm_u16x8_load8x8(i2);
275         i2 += 8;
276 
277         vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
278         const v128_t vxi3x01234567 = wasm_u16x8_load8x8(i3);
279         i3 += 8;
280         vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
281         const v128_t vxi4x01234567 = wasm_u16x8_load8x8(i4);
282         i4 += 8;
283         vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
284         const v128_t vxi5x01234567 = wasm_u16x8_load8x8(i5);
285         i5 += 8;
286         vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
287         const v128_t vxi6x01234567 = wasm_u16x8_load8x8(i6);
288         i6 += 8;
289 
290         vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
291 
292         v128_t vacc0123 = wasm_v128_load(b);
293         v128_t vacc4567 = wasm_v128_load(b + 4);
294 
295         vacc0123 = wasm_i32x4_add(vacc0123, wasm_u32x4_extend_low_u16x8(vacc01234567));
296         vacc4567 = wasm_i32x4_add(vacc4567, wasm_u32x4_extend_high_u16x8(vacc01234567));
297 
298         wasm_v128_store(b, vacc0123);
299         wasm_v128_store(b + 4, vacc4567);
300         b += 8;
301 
302         c = doz(c, 8);
303       } while (c != 0);
304     }
305   }
306 
307   i0 = (const uint8_t*) ((uintptr_t) i0 + input_increment);
308   i1 = (const uint8_t*) ((uintptr_t) i1 + input_increment);
309   if XNN_UNPREDICTABLE(rows < 2) {
310     i1 = zero;
311   }
312   i2 = (const uint8_t*) ((uintptr_t) i2 + input_increment);
313   if XNN_UNPREDICTABLE(rows <= 2) {
314     i2 = zero;
315   }
316   i3 = (const uint8_t*) ((uintptr_t) i3 + input_increment);
317   if XNN_UNPREDICTABLE(rows < 4) {
318     i3 = zero;
319   }
320   i4 = (const uint8_t*) ((uintptr_t) i4 + input_increment);
321   if XNN_UNPREDICTABLE(rows <= 4) {
322     i4 = zero;
323   }
324   i5 = (const uint8_t*) ((uintptr_t) i5 + input_increment);
325   if XNN_UNPREDICTABLE(rows < 6) {
326     i5 = zero;
327   }
328   i6 = (const uint8_t*) ((uintptr_t) i6 + input_increment);
329   if XNN_UNPREDICTABLE(rows <= 6) {
330     i6 = zero;
331   }
332 
333   const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
334   const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
335   const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
336   const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
337   const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
338   for (; channels >= 32; channels -= 32) {
339     const v128_t vxi0x01234567 = wasm_u16x8_load8x8(i0);
340     const v128_t vxi0x89ABCDEF = wasm_u16x8_load8x8(i0 + 8);
341     const v128_t vxi0xGHIJKLMN = wasm_u16x8_load8x8(i0 + 16);
342     const v128_t vxi0xOPQRSTUV = wasm_u16x8_load8x8(i0 + 24);
343     i0 += 32;
344     const v128_t vxi1x01234567 = wasm_u16x8_load8x8(i1);
345     const v128_t vxi1x89ABCDEF = wasm_u16x8_load8x8(i1 + 8);
346     const v128_t vxi1xGHIJKLMN = wasm_u16x8_load8x8(i1 + 16);
347     const v128_t vxi1xOPQRSTUV = wasm_u16x8_load8x8(i1 + 24);
348     i1 += 32;
349 
350     v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
351     const v128_t vxi2x01234567 = wasm_u16x8_load8x8(i2);
352     v128_t vacc89ABCDEF = wasm_i16x8_add(vxi0x89ABCDEF, vxi1x89ABCDEF);
353     const v128_t vxi2x89ABCDEF = wasm_u16x8_load8x8(i2 + 8);
354     v128_t vaccGHIJKLMN = wasm_i16x8_add(vxi0xGHIJKLMN, vxi1xGHIJKLMN);
355     const v128_t vxi2xGHIJKLMN = wasm_u16x8_load8x8(i2 + 16);
356     v128_t vaccOPQRSTUV = wasm_i16x8_add(vxi0xOPQRSTUV, vxi1xOPQRSTUV);
357     const v128_t vxi2xOPQRSTUV = wasm_u16x8_load8x8(i2 + 24);
358     i2 += 32;
359 
360     vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
361     const v128_t vxi3x01234567 = wasm_u16x8_load8x8(i3);
362     vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi2x89ABCDEF);
363     const v128_t vxi3x89ABCDEF = wasm_u16x8_load8x8(i3 + 8);
364     vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi2xGHIJKLMN);
365     const v128_t vxi3xGHIJKLMN = wasm_u16x8_load8x8(i3 + 16);
366     vaccOPQRSTUV = wasm_i16x8_add(vaccOPQRSTUV, vxi2xOPQRSTUV);
367     const v128_t vxi3xOPQRSTUV = wasm_u16x8_load8x8(i3 + 24);
368     i3 += 32;
369     vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
370     const v128_t vxi4x01234567 = wasm_u16x8_load8x8(i4);
371     vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi3x89ABCDEF);
372     const v128_t vxi4x89ABCDEF = wasm_u16x8_load8x8(i4 + 8);
373     vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi3xGHIJKLMN);
374     const v128_t vxi4xGHIJKLMN = wasm_u16x8_load8x8(i4 + 16);
375     vaccOPQRSTUV = wasm_i16x8_add(vaccOPQRSTUV, vxi3xOPQRSTUV);
376     const v128_t vxi4xOPQRSTUV = wasm_u16x8_load8x8(i4 + 24);
377     i4 += 32;
378     vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
379     const v128_t vxi5x01234567 = wasm_u16x8_load8x8(i5);
380     vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi4x89ABCDEF);
381     const v128_t vxi5x89ABCDEF = wasm_u16x8_load8x8(i5 + 8);
382     vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi4xGHIJKLMN);
383     const v128_t vxi5xGHIJKLMN = wasm_u16x8_load8x8(i5 + 16);
384     vaccOPQRSTUV = wasm_i16x8_add(vaccOPQRSTUV, vxi4xOPQRSTUV);
385     const v128_t vxi5xOPQRSTUV = wasm_u16x8_load8x8(i5 + 24);
386     i5 += 32;
387     vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
388     const v128_t vxi6x01234567 = wasm_u16x8_load8x8(i6);
389     vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi5x89ABCDEF);
390     const v128_t vxi6x89ABCDEF = wasm_u16x8_load8x8(i6 + 8);
391     vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi5xGHIJKLMN);
392     const v128_t vxi6xGHIJKLMN = wasm_u16x8_load8x8(i6 + 16);
393     vaccOPQRSTUV = wasm_i16x8_add(vaccOPQRSTUV, vxi5xOPQRSTUV);
394     const v128_t vxi6xOPQRSTUV = wasm_u16x8_load8x8(i6 + 24);
395     i6 += 32;
396 
397     vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
398     vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi6x89ABCDEF);
399     vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi6xGHIJKLMN);
400     vaccOPQRSTUV = wasm_i16x8_add(vaccOPQRSTUV, vxi6xOPQRSTUV);
401 
402     v128_t vacc0123 = wasm_v128_load(buffer);
403     v128_t vacc4567 = wasm_v128_load(buffer + 4);
404     v128_t vacc89AB = wasm_v128_load(buffer + 8);
405     v128_t vaccCDEF = wasm_v128_load(buffer + 12);
406     v128_t vaccGHIJ = wasm_v128_load(buffer + 16);
407     v128_t vaccKLMN = wasm_v128_load(buffer + 20);
408     v128_t vaccOPQR = wasm_v128_load(buffer + 24);
409     v128_t vaccSTUV = wasm_v128_load(buffer + 28);
410     buffer += 32;
411 
412     vacc0123 = wasm_i32x4_add(vacc0123, wasm_u32x4_extend_low_u16x8(vacc01234567));
413     vacc4567 = wasm_i32x4_add(vacc4567, wasm_u32x4_extend_high_u16x8(vacc01234567));
414     vacc89AB = wasm_i32x4_add(vacc89AB, wasm_u32x4_extend_low_u16x8(vacc89ABCDEF));
415     vaccCDEF = wasm_i32x4_add(vaccCDEF, wasm_u32x4_extend_high_u16x8(vacc89ABCDEF));
416     vaccGHIJ = wasm_i32x4_add(vaccGHIJ, wasm_u32x4_extend_low_u16x8(vaccGHIJKLMN));
417     vaccKLMN = wasm_i32x4_add(vaccKLMN, wasm_u32x4_extend_high_u16x8(vaccGHIJKLMN));
418     vaccOPQR = wasm_i32x4_add(vaccOPQR, wasm_u32x4_extend_low_u16x8(vaccOPQRSTUV));
419     vaccSTUV = wasm_i32x4_add(vaccSTUV, wasm_u32x4_extend_high_u16x8(vaccOPQRSTUV));
420 
421     vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
422     vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
423     vacc89AB = wasm_f32x4_convert_i32x4(vacc89AB);
424     vaccCDEF = wasm_f32x4_convert_i32x4(vaccCDEF);
425     vaccGHIJ = wasm_f32x4_convert_i32x4(vaccGHIJ);
426     vaccKLMN = wasm_f32x4_convert_i32x4(vaccKLMN);
427     vaccOPQR = wasm_f32x4_convert_i32x4(vaccOPQR);
428     vaccSTUV = wasm_f32x4_convert_i32x4(vaccSTUV);
429 
430     vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
431     vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
432     vacc89AB = wasm_f32x4_mul(vacc89AB, vscale);
433     vaccCDEF = wasm_f32x4_mul(vaccCDEF, vscale);
434     vaccGHIJ = wasm_f32x4_mul(vaccGHIJ, vscale);
435     vaccKLMN = wasm_f32x4_mul(vaccKLMN, vscale);
436     vaccOPQR = wasm_f32x4_mul(vaccOPQR, vscale);
437     vaccSTUV = wasm_f32x4_mul(vaccSTUV, vscale);
438 
439     vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
440     vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
441     vacc89AB = wasm_f32x4_add(vacc89AB, vmagic_bias);
442     vaccCDEF = wasm_f32x4_add(vaccCDEF, vmagic_bias);
443     vaccGHIJ = wasm_f32x4_add(vaccGHIJ, vmagic_bias);
444     vaccKLMN = wasm_f32x4_add(vaccKLMN, vmagic_bias);
445     vaccOPQR = wasm_f32x4_add(vaccOPQR, vmagic_bias);
446     vaccSTUV = wasm_f32x4_add(vaccSTUV, vmagic_bias);
447 
448     vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
449     vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
450     vacc89AB = wasm_i32x4_max(vacc89AB, vmagic_min);
451     vaccCDEF = wasm_i32x4_max(vaccCDEF, vmagic_min);
452     vaccGHIJ = wasm_i32x4_max(vaccGHIJ, vmagic_min);
453     vaccKLMN = wasm_i32x4_max(vaccKLMN, vmagic_min);
454     vaccOPQR = wasm_i32x4_max(vaccOPQR, vmagic_min);
455     vaccSTUV = wasm_i32x4_max(vaccSTUV, vmagic_min);
456 
457     vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
458     vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
459     vacc89AB = wasm_i32x4_sub(vacc89AB, vmagic_bias_less_output_zero_point);
460     vaccCDEF = wasm_i32x4_sub(vaccCDEF, vmagic_bias_less_output_zero_point);
461     vaccGHIJ = wasm_i32x4_sub(vaccGHIJ, vmagic_bias_less_output_zero_point);
462     vaccKLMN = wasm_i32x4_sub(vaccKLMN, vmagic_bias_less_output_zero_point);
463     vaccOPQR = wasm_i32x4_sub(vaccOPQR, vmagic_bias_less_output_zero_point);
464     vaccSTUV = wasm_i32x4_sub(vaccSTUV, vmagic_bias_less_output_zero_point);
465 
466     v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
467     v128_t vout89ABCDEF = wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF);
468     v128_t voutGHIJKLMN = wasm_i16x8_narrow_i32x4(vaccGHIJ, vaccKLMN);
469     v128_t voutOPQRSTUV = wasm_i16x8_narrow_i32x4(vaccOPQR, vaccSTUV);
470 
471     v128_t vout0123456789ABCDEF = wasm_u8x16_narrow_i16x8(vout01234567, vout89ABCDEF);
472     v128_t voutGHIJKLMNOPQRSTUV = wasm_u8x16_narrow_i16x8(voutGHIJKLMN, voutOPQRSTUV);
473 
474     vout0123456789ABCDEF = wasm_u8x16_min(vout0123456789ABCDEF, voutput_max);
475     voutGHIJKLMNOPQRSTUV = wasm_u8x16_min(voutGHIJKLMNOPQRSTUV, voutput_max);
476 
477     wasm_v128_store(output, vout0123456789ABCDEF);
478     wasm_v128_store(output + 16, voutGHIJKLMNOPQRSTUV);
479     output += 32;
480   }
481   if XNN_UNLIKELY(channels != 0) {
482     do {
483       const v128_t vxi0x01234567 = wasm_u16x8_load8x8(i0);
484       i0 += 8;
485       const v128_t vxi1x01234567 = wasm_u16x8_load8x8(i1);
486       i1 += 8;
487 
488       v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
489       const v128_t vxi2x01234567 = wasm_u16x8_load8x8(i2);
490       i2 += 8;
491 
492       vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
493       const v128_t vxi3x01234567 = wasm_u16x8_load8x8(i3);
494       i3 += 8;
495       vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
496       const v128_t vxi4x01234567 = wasm_u16x8_load8x8(i4);
497       i4 += 8;
498       vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
499       const v128_t vxi5x01234567 = wasm_u16x8_load8x8(i5);
500       i5 += 8;
501       vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
502       const v128_t vxi6x01234567 = wasm_u16x8_load8x8(i6);
503       i6 += 8;
504 
505       vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
506 
507       v128_t vacc0123 = wasm_v128_load(buffer);
508       v128_t vacc4567 = wasm_v128_load(buffer + 4);
509       buffer += 8;
510 
511       vacc0123 = wasm_i32x4_add(vacc0123, wasm_u32x4_extend_low_u16x8(vacc01234567));
512       vacc4567 = wasm_i32x4_add(vacc4567, wasm_u32x4_extend_high_u16x8(vacc01234567));
513 
514       vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
515       vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
516 
517       vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
518       vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
519 
520       vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
521       vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
522 
523       vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
524       vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
525 
526       vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
527       vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
528 
529       const v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
530       v128_t vout0123456701234567 = wasm_u8x16_narrow_i16x8(vout01234567, vout01234567);
531       vout0123456701234567 = wasm_u8x16_min(vout0123456701234567, voutput_max);
532 
533       if XNN_LIKELY(channels >= 8) {
534         *((double*) output) = wasm_f64x2_extract_lane(vout0123456701234567, 0);
535         output += 8;
536         channels -= 8;
537       } else {
538         if (channels & 4) {
539           *((float*) output) = wasm_f32x4_extract_lane(vout0123456701234567, 0);
540           vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
541           output += 4;
542         }
543         uint32_t vout0123 = wasm_i32x4_extract_lane(vout0123456701234567, 0);
544         if (channels & 2) {
545           *((uint16_t*) output) = (uint16_t) vout0123;
546           vout0123 >>= 16;
547           output += 2;
548         }
549         if (channels & 1) {
550           *output = (uint8_t) vout0123;
551           output += 1;
552         }
553         channels = 0;
554       }
555     } while (channels != 0);
556   }
557 }
558