xref: /aosp_15_r20/external/XNNPACK/src/qs8-dwconv/gen/up8x25-minmax-fp32-wasmsimd-mul16-add16.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-dwconv/unipass-wasmsimd-mul16.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 #include <xnnpack/dwconv.h>
15 
16 
xnn_qs8_dwconv_minmax_fp32_ukernel_up8x25__wasmsimd_mul16_add16(size_t channels,size_t output_width,const int8_t ** input,const void * weights,int8_t * output,size_t input_stride,size_t output_increment,size_t input_offset,const int8_t * zero,const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_qs8_dwconv_minmax_fp32_ukernel_up8x25__wasmsimd_mul16_add16(
18     size_t channels,
19     size_t output_width,
20     const int8_t** input,
21     const void* weights,
22     int8_t* output,
23     size_t input_stride,
24     size_t output_increment,
25     size_t input_offset,
26     const int8_t* zero,
27     const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28 {
29   assert(channels != 0);
30   assert(output_width != 0);
31 
32   do {
33     const int8_t* i0 = input[0];
34     assert(i0 != NULL);
35     if XNN_UNPREDICTABLE(i0 != zero) {
36       i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
37     }
38     const int8_t* i1 = input[1];
39     assert(i1 != NULL);
40     if XNN_UNPREDICTABLE(i1 != zero) {
41       i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
42     }
43     const int8_t* i2 = input[2];
44     assert(i2 != NULL);
45     if XNN_UNPREDICTABLE(i2 != zero) {
46       i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
47     }
48     const int8_t* i3 = input[3];
49     assert(i3 != NULL);
50     if XNN_UNPREDICTABLE(i3 != zero) {
51       i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
52     }
53     const int8_t* i4 = input[4];
54     assert(i4 != NULL);
55     if XNN_UNPREDICTABLE(i4 != zero) {
56       i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
57     }
58     const int8_t* i5 = input[5];
59     assert(i5 != NULL);
60     if XNN_UNPREDICTABLE(i5 != zero) {
61       i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
62     }
63     const int8_t* i6 = input[6];
64     assert(i6 != NULL);
65     if XNN_UNPREDICTABLE(i6 != zero) {
66       i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
67     }
68     const int8_t* i7 = input[7];
69     assert(i7 != NULL);
70     if XNN_UNPREDICTABLE(i7 != zero) {
71       i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
72     }
73     const int8_t* i8 = input[8];
74     assert(i8 != NULL);
75     if XNN_UNPREDICTABLE(i8 != zero) {
76       i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
77     }
78     const int8_t* i9 = input[9];
79     assert(i9 != NULL);
80     if XNN_UNPREDICTABLE(i9 != zero) {
81       i9 = (const int8_t*) ((uintptr_t) i9 + input_offset);
82     }
83     const int8_t* i10 = input[10];
84     assert(i10 != NULL);
85     if XNN_UNPREDICTABLE(i10 != zero) {
86       i10 = (const int8_t*) ((uintptr_t) i10 + input_offset);
87     }
88     const int8_t* i11 = input[11];
89     assert(i11 != NULL);
90     if XNN_UNPREDICTABLE(i11 != zero) {
91       i11 = (const int8_t*) ((uintptr_t) i11 + input_offset);
92     }
93     const int8_t* i12 = input[12];
94     assert(i12 != NULL);
95     if XNN_UNPREDICTABLE(i12 != zero) {
96       i12 = (const int8_t*) ((uintptr_t) i12 + input_offset);
97     }
98     const int8_t* i13 = input[13];
99     assert(i13 != NULL);
100     if XNN_UNPREDICTABLE(i13 != zero) {
101       i13 = (const int8_t*) ((uintptr_t) i13 + input_offset);
102     }
103     const int8_t* i14 = input[14];
104     assert(i14 != NULL);
105     if XNN_UNPREDICTABLE(i14 != zero) {
106       i14 = (const int8_t*) ((uintptr_t) i14 + input_offset);
107     }
108     const int8_t* i15 = input[15];
109     assert(i15 != NULL);
110     if XNN_UNPREDICTABLE(i15 != zero) {
111       i15 = (const int8_t*) ((uintptr_t) i15 + input_offset);
112     }
113     const int8_t* i16 = input[16];
114     assert(i16 != NULL);
115     if XNN_UNPREDICTABLE(i16 != zero) {
116       i16 = (const int8_t*) ((uintptr_t) i16 + input_offset);
117     }
118     const int8_t* i17 = input[17];
119     assert(i17 != NULL);
120     if XNN_UNPREDICTABLE(i17 != zero) {
121       i17 = (const int8_t*) ((uintptr_t) i17 + input_offset);
122     }
123     const int8_t* i18 = input[18];
124     assert(i18 != NULL);
125     if XNN_UNPREDICTABLE(i18 != zero) {
126       i18 = (const int8_t*) ((uintptr_t) i18 + input_offset);
127     }
128     const int8_t* i19 = input[19];
129     assert(i19 != NULL);
130     if XNN_UNPREDICTABLE(i19 != zero) {
131       i19 = (const int8_t*) ((uintptr_t) i19 + input_offset);
132     }
133     const int8_t* i20 = input[20];
134     assert(i20 != NULL);
135     if XNN_UNPREDICTABLE(i20 != zero) {
136       i20 = (const int8_t*) ((uintptr_t) i20 + input_offset);
137     }
138     const int8_t* i21 = input[21];
139     assert(i21 != NULL);
140     if XNN_UNPREDICTABLE(i21 != zero) {
141       i21 = (const int8_t*) ((uintptr_t) i21 + input_offset);
142     }
143     const int8_t* i22 = input[22];
144     assert(i22 != NULL);
145     if XNN_UNPREDICTABLE(i22 != zero) {
146       i22 = (const int8_t*) ((uintptr_t) i22 + input_offset);
147     }
148     const int8_t* i23 = input[23];
149     assert(i23 != NULL);
150     if XNN_UNPREDICTABLE(i23 != zero) {
151       i23 = (const int8_t*) ((uintptr_t) i23 + input_offset);
152     }
153     const int8_t* i24 = input[24];
154     assert(i24 != NULL);
155     if XNN_UNPREDICTABLE(i24 != zero) {
156       i24 = (const int8_t*) ((uintptr_t) i24 + input_offset);
157     }
158     input = (const int8_t**) ((uintptr_t) input + input_stride);
159 
160     size_t c = channels;
161     const void* w = weights;
162     for (; c >= 8; c -= 8) {
163       v128_t vacc0123 = wasm_v128_load(w);
164       v128_t vacc4567 = wasm_v128_load((const void*) ((uintptr_t) w + 4 * sizeof(int32_t)));
165 
166 
167       const v128_t vi0x01234567 = wasm_i16x8_load8x8(i0);
168       const v128_t vk0x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t)));
169       i0 += 8;
170 
171       v128_t vprod01234567 = wasm_i16x8_mul(vi0x01234567, vk0x01234567);
172 
173 
174       const v128_t vi1x01234567 = wasm_i16x8_load8x8(i1);
175       const v128_t vk1x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t)));
176       i1 += 8;
177 
178       vprod01234567 = wasm_i16x8_add(vprod01234567, wasm_i16x8_mul(vi1x01234567, vk1x01234567));
179 
180       vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
181       vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
182 
183       const v128_t vi2x01234567 = wasm_i16x8_load8x8(i2);
184       const v128_t vk2x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t)));
185       i2 += 8;
186 
187       vprod01234567 = wasm_i16x8_mul(vi2x01234567, vk2x01234567);
188 
189 
190       const v128_t vi3x01234567 = wasm_i16x8_load8x8(i3);
191       const v128_t vk3x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t)));
192       i3 += 8;
193 
194       vprod01234567 = wasm_i16x8_add(vprod01234567, wasm_i16x8_mul(vi3x01234567, vk3x01234567));
195 
196       vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
197       vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
198 
199       const v128_t vi4x01234567 = wasm_i16x8_load8x8(i4);
200       const v128_t vk4x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t)));
201       i4 += 8;
202 
203       vprod01234567 = wasm_i16x8_mul(vi4x01234567, vk4x01234567);
204 
205 
206       const v128_t vi5x01234567 = wasm_i16x8_load8x8(i5);
207       const v128_t vk5x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t)));
208       i5 += 8;
209 
210       vprod01234567 = wasm_i16x8_add(vprod01234567, wasm_i16x8_mul(vi5x01234567, vk5x01234567));
211 
212       vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
213       vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
214 
215       const v128_t vi6x01234567 = wasm_i16x8_load8x8(i6);
216       const v128_t vk6x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(int8_t)));
217       i6 += 8;
218 
219       vprod01234567 = wasm_i16x8_mul(vi6x01234567, vk6x01234567);
220 
221 
222       const v128_t vi7x01234567 = wasm_i16x8_load8x8(i7);
223       const v128_t vk7x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(int8_t)));
224       i7 += 8;
225 
226       vprod01234567 = wasm_i16x8_add(vprod01234567, wasm_i16x8_mul(vi7x01234567, vk7x01234567));
227 
228       vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
229       vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
230 
231       const v128_t vi8x01234567 = wasm_i16x8_load8x8(i8);
232       const v128_t vk8x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(int8_t)));
233       i8 += 8;
234 
235       vprod01234567 = wasm_i16x8_mul(vi8x01234567, vk8x01234567);
236 
237 
238       const v128_t vi9x01234567 = wasm_i16x8_load8x8(i9);
239       const v128_t vk9x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 72 * sizeof(int8_t)));
240       i9 += 8;
241 
242       vprod01234567 = wasm_i16x8_add(vprod01234567, wasm_i16x8_mul(vi9x01234567, vk9x01234567));
243 
244       vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
245       vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
246 
247       const v128_t vi10x01234567 = wasm_i16x8_load8x8(i10);
248       const v128_t vk10x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 80 * sizeof(int8_t)));
249       i10 += 8;
250 
251       vprod01234567 = wasm_i16x8_mul(vi10x01234567, vk10x01234567);
252 
253 
254       const v128_t vi11x01234567 = wasm_i16x8_load8x8(i11);
255       const v128_t vk11x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 88 * sizeof(int8_t)));
256       i11 += 8;
257 
258       vprod01234567 = wasm_i16x8_add(vprod01234567, wasm_i16x8_mul(vi11x01234567, vk11x01234567));
259 
260       vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
261       vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
262 
263       const v128_t vi12x01234567 = wasm_i16x8_load8x8(i12);
264       const v128_t vk12x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 96 * sizeof(int8_t)));
265       i12 += 8;
266 
267       vprod01234567 = wasm_i16x8_mul(vi12x01234567, vk12x01234567);
268 
269 
270       const v128_t vi13x01234567 = wasm_i16x8_load8x8(i13);
271       const v128_t vk13x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 104 * sizeof(int8_t)));
272       i13 += 8;
273 
274       vprod01234567 = wasm_i16x8_add(vprod01234567, wasm_i16x8_mul(vi13x01234567, vk13x01234567));
275 
276       vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
277       vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
278 
279       const v128_t vi14x01234567 = wasm_i16x8_load8x8(i14);
280       const v128_t vk14x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 112 * sizeof(int8_t)));
281       i14 += 8;
282 
283       vprod01234567 = wasm_i16x8_mul(vi14x01234567, vk14x01234567);
284 
285 
286       const v128_t vi15x01234567 = wasm_i16x8_load8x8(i15);
287       const v128_t vk15x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 120 * sizeof(int8_t)));
288       i15 += 8;
289 
290       vprod01234567 = wasm_i16x8_add(vprod01234567, wasm_i16x8_mul(vi15x01234567, vk15x01234567));
291 
292       vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
293       vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
294 
295       const v128_t vi16x01234567 = wasm_i16x8_load8x8(i16);
296       const v128_t vk16x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 128 * sizeof(int8_t)));
297       i16 += 8;
298 
299       vprod01234567 = wasm_i16x8_mul(vi16x01234567, vk16x01234567);
300 
301 
302       const v128_t vi17x01234567 = wasm_i16x8_load8x8(i17);
303       const v128_t vk17x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 136 * sizeof(int8_t)));
304       i17 += 8;
305 
306       vprod01234567 = wasm_i16x8_add(vprod01234567, wasm_i16x8_mul(vi17x01234567, vk17x01234567));
307 
308       vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
309       vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
310 
311       const v128_t vi18x01234567 = wasm_i16x8_load8x8(i18);
312       const v128_t vk18x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 144 * sizeof(int8_t)));
313       i18 += 8;
314 
315       vprod01234567 = wasm_i16x8_mul(vi18x01234567, vk18x01234567);
316 
317 
318       const v128_t vi19x01234567 = wasm_i16x8_load8x8(i19);
319       const v128_t vk19x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 152 * sizeof(int8_t)));
320       i19 += 8;
321 
322       vprod01234567 = wasm_i16x8_add(vprod01234567, wasm_i16x8_mul(vi19x01234567, vk19x01234567));
323 
324       vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
325       vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
326 
327       const v128_t vi20x01234567 = wasm_i16x8_load8x8(i20);
328       const v128_t vk20x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 160 * sizeof(int8_t)));
329       i20 += 8;
330 
331       vprod01234567 = wasm_i16x8_mul(vi20x01234567, vk20x01234567);
332 
333 
334       const v128_t vi21x01234567 = wasm_i16x8_load8x8(i21);
335       const v128_t vk21x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 168 * sizeof(int8_t)));
336       i21 += 8;
337 
338       vprod01234567 = wasm_i16x8_add(vprod01234567, wasm_i16x8_mul(vi21x01234567, vk21x01234567));
339 
340       vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
341       vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
342 
343       const v128_t vi22x01234567 = wasm_i16x8_load8x8(i22);
344       const v128_t vk22x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 176 * sizeof(int8_t)));
345       i22 += 8;
346 
347       vprod01234567 = wasm_i16x8_mul(vi22x01234567, vk22x01234567);
348 
349 
350       const v128_t vi23x01234567 = wasm_i16x8_load8x8(i23);
351       const v128_t vk23x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 184 * sizeof(int8_t)));
352       i23 += 8;
353 
354       vprod01234567 = wasm_i16x8_add(vprod01234567, wasm_i16x8_mul(vi23x01234567, vk23x01234567));
355 
356       vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
357       vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
358 
359       const v128_t vi24x01234567 = wasm_i16x8_load8x8(i24);
360       const v128_t vk24x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 192 * sizeof(int8_t)));
361       i24 += 8;
362 
363       vprod01234567 = wasm_i16x8_mul(vi24x01234567, vk24x01234567);
364 
365       vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
366       vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
367 
368 
369       w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 200 * sizeof(int8_t));
370 
371       vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
372       vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
373 
374       const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
375       vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
376       vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
377 
378       const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
379       vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
380       vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
381 
382       const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
383       vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
384       vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
385 
386       const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
387       vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
388       vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
389 
390       v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
391 
392       v128_t vout0123456701234567 = wasm_i8x16_narrow_i16x8(vout01234567, vout01234567);
393 
394       const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
395       vout0123456701234567 = wasm_i8x16_min(vout0123456701234567, voutput_max);
396 
397       *((double*) output) = wasm_f64x2_extract_lane(vout0123456701234567, 0);
398       output += 8;
399     }
400     if XNN_UNLIKELY(c != 0) {
401       {
402         v128_t vacc0123 = wasm_v128_load(w);
403         v128_t vacc4567 = wasm_v128_load((const void*) ((uintptr_t) w + 4 * sizeof(int32_t)));
404 
405 
406         const v128_t vi0x01234567 = wasm_i16x8_load8x8(i0);
407         const v128_t vk0x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t)));
408 
409         v128_t vprod01234567 = wasm_i16x8_mul(vi0x01234567, vk0x01234567);
410 
411 
412         const v128_t vi1x01234567 = wasm_i16x8_load8x8(i1);
413         const v128_t vk1x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t)));
414 
415         vprod01234567 = wasm_i16x8_add(vprod01234567, wasm_i16x8_mul(vi1x01234567, vk1x01234567));
416 
417         vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
418         vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
419 
420         const v128_t vi2x01234567 = wasm_i16x8_load8x8(i2);
421         const v128_t vk2x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t)));
422 
423         vprod01234567 = wasm_i16x8_mul(vi2x01234567, vk2x01234567);
424 
425 
426         const v128_t vi3x01234567 = wasm_i16x8_load8x8(i3);
427         const v128_t vk3x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t)));
428 
429         vprod01234567 = wasm_i16x8_add(vprod01234567, wasm_i16x8_mul(vi3x01234567, vk3x01234567));
430 
431         vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
432         vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
433 
434         const v128_t vi4x01234567 = wasm_i16x8_load8x8(i4);
435         const v128_t vk4x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t)));
436 
437         vprod01234567 = wasm_i16x8_mul(vi4x01234567, vk4x01234567);
438 
439 
440         const v128_t vi5x01234567 = wasm_i16x8_load8x8(i5);
441         const v128_t vk5x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t)));
442 
443         vprod01234567 = wasm_i16x8_add(vprod01234567, wasm_i16x8_mul(vi5x01234567, vk5x01234567));
444 
445         vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
446         vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
447 
448         const v128_t vi6x01234567 = wasm_i16x8_load8x8(i6);
449         const v128_t vk6x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(int8_t)));
450 
451         vprod01234567 = wasm_i16x8_mul(vi6x01234567, vk6x01234567);
452 
453 
454         const v128_t vi7x01234567 = wasm_i16x8_load8x8(i7);
455         const v128_t vk7x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(int8_t)));
456 
457         vprod01234567 = wasm_i16x8_add(vprod01234567, wasm_i16x8_mul(vi7x01234567, vk7x01234567));
458 
459         vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
460         vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
461 
462         const v128_t vi8x01234567 = wasm_i16x8_load8x8(i8);
463         const v128_t vk8x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(int8_t)));
464 
465         vprod01234567 = wasm_i16x8_mul(vi8x01234567, vk8x01234567);
466 
467 
468         const v128_t vi9x01234567 = wasm_i16x8_load8x8(i9);
469         const v128_t vk9x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 72 * sizeof(int8_t)));
470 
471         vprod01234567 = wasm_i16x8_add(vprod01234567, wasm_i16x8_mul(vi9x01234567, vk9x01234567));
472 
473         vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
474         vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
475 
476         const v128_t vi10x01234567 = wasm_i16x8_load8x8(i10);
477         const v128_t vk10x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 80 * sizeof(int8_t)));
478 
479         vprod01234567 = wasm_i16x8_mul(vi10x01234567, vk10x01234567);
480 
481 
482         const v128_t vi11x01234567 = wasm_i16x8_load8x8(i11);
483         const v128_t vk11x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 88 * sizeof(int8_t)));
484 
485         vprod01234567 = wasm_i16x8_add(vprod01234567, wasm_i16x8_mul(vi11x01234567, vk11x01234567));
486 
487         vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
488         vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
489 
490         const v128_t vi12x01234567 = wasm_i16x8_load8x8(i12);
491         const v128_t vk12x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 96 * sizeof(int8_t)));
492 
493         vprod01234567 = wasm_i16x8_mul(vi12x01234567, vk12x01234567);
494 
495 
496         const v128_t vi13x01234567 = wasm_i16x8_load8x8(i13);
497         const v128_t vk13x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 104 * sizeof(int8_t)));
498 
499         vprod01234567 = wasm_i16x8_add(vprod01234567, wasm_i16x8_mul(vi13x01234567, vk13x01234567));
500 
501         vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
502         vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
503 
504         const v128_t vi14x01234567 = wasm_i16x8_load8x8(i14);
505         const v128_t vk14x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 112 * sizeof(int8_t)));
506 
507         vprod01234567 = wasm_i16x8_mul(vi14x01234567, vk14x01234567);
508 
509 
510         const v128_t vi15x01234567 = wasm_i16x8_load8x8(i15);
511         const v128_t vk15x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 120 * sizeof(int8_t)));
512 
513         vprod01234567 = wasm_i16x8_add(vprod01234567, wasm_i16x8_mul(vi15x01234567, vk15x01234567));
514 
515         vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
516         vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
517 
518         const v128_t vi16x01234567 = wasm_i16x8_load8x8(i16);
519         const v128_t vk16x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 128 * sizeof(int8_t)));
520 
521         vprod01234567 = wasm_i16x8_mul(vi16x01234567, vk16x01234567);
522 
523 
524         const v128_t vi17x01234567 = wasm_i16x8_load8x8(i17);
525         const v128_t vk17x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 136 * sizeof(int8_t)));
526 
527         vprod01234567 = wasm_i16x8_add(vprod01234567, wasm_i16x8_mul(vi17x01234567, vk17x01234567));
528 
529         vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
530         vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
531 
532         const v128_t vi18x01234567 = wasm_i16x8_load8x8(i18);
533         const v128_t vk18x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 144 * sizeof(int8_t)));
534 
535         vprod01234567 = wasm_i16x8_mul(vi18x01234567, vk18x01234567);
536 
537 
538         const v128_t vi19x01234567 = wasm_i16x8_load8x8(i19);
539         const v128_t vk19x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 152 * sizeof(int8_t)));
540 
541         vprod01234567 = wasm_i16x8_add(vprod01234567, wasm_i16x8_mul(vi19x01234567, vk19x01234567));
542 
543         vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
544         vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
545 
546         const v128_t vi20x01234567 = wasm_i16x8_load8x8(i20);
547         const v128_t vk20x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 160 * sizeof(int8_t)));
548 
549         vprod01234567 = wasm_i16x8_mul(vi20x01234567, vk20x01234567);
550 
551 
552         const v128_t vi21x01234567 = wasm_i16x8_load8x8(i21);
553         const v128_t vk21x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 168 * sizeof(int8_t)));
554 
555         vprod01234567 = wasm_i16x8_add(vprod01234567, wasm_i16x8_mul(vi21x01234567, vk21x01234567));
556 
557         vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
558         vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
559 
560         const v128_t vi22x01234567 = wasm_i16x8_load8x8(i22);
561         const v128_t vk22x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 176 * sizeof(int8_t)));
562 
563         vprod01234567 = wasm_i16x8_mul(vi22x01234567, vk22x01234567);
564 
565 
566         const v128_t vi23x01234567 = wasm_i16x8_load8x8(i23);
567         const v128_t vk23x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 184 * sizeof(int8_t)));
568 
569         vprod01234567 = wasm_i16x8_add(vprod01234567, wasm_i16x8_mul(vi23x01234567, vk23x01234567));
570 
571         vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
572         vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
573 
574         const v128_t vi24x01234567 = wasm_i16x8_load8x8(i24);
575         const v128_t vk24x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 192 * sizeof(int8_t)));
576 
577         vprod01234567 = wasm_i16x8_mul(vi24x01234567, vk24x01234567);
578 
579         vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
580         vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
581 
582 
583 
584       vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
585       vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
586 
587       const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
588       vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
589       vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
590 
591       const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
592       vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
593       vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
594 
595       const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
596       vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
597       vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
598 
599       const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
600       vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
601       vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
602 
603       v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
604       v128_t vout0123456701234567 = wasm_i8x16_narrow_i16x8(vout01234567, vout01234567);
605 
606       const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
607       vout0123456701234567 = wasm_i8x16_min(vout0123456701234567, voutput_max);
608 
609 
610       if (c & 4) {
611         *((float*) output) = wasm_f32x4_extract_lane(vout0123456701234567, 0);
612         vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
613         output += 4;
614       }
615       uint32_t vout0123 = wasm_i32x4_extract_lane(vout0123456701234567, 0);
616       if (c & 2) {
617         *((uint16_t*) output) = (uint16_t) vout0123;
618         vout0123 >>= 16;
619         output += 2;
620       }
621       if (c & 1) {
622         *output = (int8_t) vout0123;
623         output += 1;
624       }
625       }
626     }
627 
628     output = (int8_t*) ((uintptr_t) output + output_increment);
629   } while (--output_width != 0);
630 }
631