xref: /aosp_15_r20/external/XNNPACK/src/qc8-dwconv/gen/up24x9-minmax-fp32-wasmsimd-mul16-add16.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-dwconv/unipass-wasmsimd-mul16.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 #include <xnnpack/dwconv.h>
15 
16 
xnn_qc8_dwconv_minmax_fp32_ukernel_up24x9__wasmsimd_mul16_add16(size_t channels,size_t output_width,const int8_t ** input,const void * weights,int8_t * output,size_t input_stride,size_t output_increment,size_t input_offset,const int8_t * zero,const union xnn_qc8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_qc8_dwconv_minmax_fp32_ukernel_up24x9__wasmsimd_mul16_add16(
18     size_t channels,
19     size_t output_width,
20     const int8_t** input,
21     const void* weights,
22     int8_t* output,
23     size_t input_stride,
24     size_t output_increment,
25     size_t input_offset,
26     const int8_t* zero,
27     const union xnn_qc8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28 {
29   assert(channels != 0);
30   assert(output_width != 0);
31 
32   do {
33     const int8_t* i0 = input[0];
34     assert(i0 != NULL);
35     if XNN_UNPREDICTABLE(i0 != zero) {
36       i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
37     }
38     const int8_t* i1 = input[1];
39     assert(i1 != NULL);
40     if XNN_UNPREDICTABLE(i1 != zero) {
41       i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
42     }
43     const int8_t* i2 = input[2];
44     assert(i2 != NULL);
45     if XNN_UNPREDICTABLE(i2 != zero) {
46       i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
47     }
48     const int8_t* i3 = input[3];
49     assert(i3 != NULL);
50     if XNN_UNPREDICTABLE(i3 != zero) {
51       i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
52     }
53     const int8_t* i4 = input[4];
54     assert(i4 != NULL);
55     if XNN_UNPREDICTABLE(i4 != zero) {
56       i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
57     }
58     const int8_t* i5 = input[5];
59     assert(i5 != NULL);
60     if XNN_UNPREDICTABLE(i5 != zero) {
61       i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
62     }
63     const int8_t* i6 = input[6];
64     assert(i6 != NULL);
65     if XNN_UNPREDICTABLE(i6 != zero) {
66       i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
67     }
68     const int8_t* i7 = input[7];
69     assert(i7 != NULL);
70     if XNN_UNPREDICTABLE(i7 != zero) {
71       i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
72     }
73     const int8_t* i8 = input[8];
74     assert(i8 != NULL);
75     if XNN_UNPREDICTABLE(i8 != zero) {
76       i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
77     }
78     input = (const int8_t**) ((uintptr_t) input + input_stride);
79 
80     size_t c = channels;
81     const void* w = weights;
82     for (; c >= 24; c -= 24) {
83       v128_t vacc0123 = wasm_v128_load(w);
84       v128_t vacc4567 = wasm_v128_load((const void*) ((uintptr_t) w + 4 * sizeof(int32_t)));
85       v128_t vacc89AB = wasm_v128_load((const void*) ((uintptr_t) w + 8 * sizeof(int32_t)));
86       v128_t vaccCDEF = wasm_v128_load((const void*) ((uintptr_t) w + 12 * sizeof(int32_t)));
87       v128_t vaccGHIJ = wasm_v128_load((const void*) ((uintptr_t) w + 16 * sizeof(int32_t)));
88       v128_t vaccKLMN = wasm_v128_load((const void*) ((uintptr_t) w + 20 * sizeof(int32_t)));
89 
90 
91       const v128_t vi0x01234567 = wasm_i16x8_load8x8(i0);
92       const v128_t vk0x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 0 * sizeof(int8_t)));
93       const v128_t vi0x89ABCDEF = wasm_i16x8_load8x8(i0 + 8);
94       const v128_t vk0x89ABCDEF = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 8 * sizeof(int8_t)));
95       const v128_t vi0xGHIJKLMN = wasm_i16x8_load8x8(i0 + 16);
96       const v128_t vk0xGHIJKLMN = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 16 * sizeof(int8_t)));
97       i0 += 24;
98 
99       v128_t vprod01234567 = wasm_i16x8_mul(vi0x01234567, vk0x01234567);
100       v128_t vprod89ABCDEF = wasm_i16x8_mul(vi0x89ABCDEF, vk0x89ABCDEF);
101       v128_t vprodGHIJKLMN = wasm_i16x8_mul(vi0xGHIJKLMN, vk0xGHIJKLMN);
102 
103 
104       const v128_t vi1x01234567 = wasm_i16x8_load8x8(i1);
105       const v128_t vk1x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 24 * sizeof(int8_t)));
106       const v128_t vi1x89ABCDEF = wasm_i16x8_load8x8(i1 + 8);
107       const v128_t vk1x89ABCDEF = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 32 * sizeof(int8_t)));
108       const v128_t vi1xGHIJKLMN = wasm_i16x8_load8x8(i1 + 16);
109       const v128_t vk1xGHIJKLMN = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 40 * sizeof(int8_t)));
110       i1 += 24;
111 
112       vprod01234567 = wasm_i16x8_add(vprod01234567, wasm_i16x8_mul(vi1x01234567, vk1x01234567));
113       vprod89ABCDEF = wasm_i16x8_add(vprod89ABCDEF, wasm_i16x8_mul(vi1x89ABCDEF, vk1x89ABCDEF));
114       vprodGHIJKLMN = wasm_i16x8_add(vprodGHIJKLMN, wasm_i16x8_mul(vi1xGHIJKLMN, vk1xGHIJKLMN));
115 
116       vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
117       vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
118       vacc89AB = wasm_i32x4_add(vacc89AB, wasm_i32x4_extend_low_i16x8(vprod89ABCDEF));
119       vaccCDEF = wasm_i32x4_add(vaccCDEF, wasm_i32x4_extend_high_i16x8(vprod89ABCDEF));
120       vaccGHIJ = wasm_i32x4_add(vaccGHIJ, wasm_i32x4_extend_low_i16x8(vprodGHIJKLMN));
121       vaccKLMN = wasm_i32x4_add(vaccKLMN, wasm_i32x4_extend_high_i16x8(vprodGHIJKLMN));
122 
123       const v128_t vi2x01234567 = wasm_i16x8_load8x8(i2);
124       const v128_t vk2x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 48 * sizeof(int8_t)));
125       const v128_t vi2x89ABCDEF = wasm_i16x8_load8x8(i2 + 8);
126       const v128_t vk2x89ABCDEF = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 56 * sizeof(int8_t)));
127       const v128_t vi2xGHIJKLMN = wasm_i16x8_load8x8(i2 + 16);
128       const v128_t vk2xGHIJKLMN = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 64 * sizeof(int8_t)));
129       i2 += 24;
130 
131       vprod01234567 = wasm_i16x8_mul(vi2x01234567, vk2x01234567);
132       vprod89ABCDEF = wasm_i16x8_mul(vi2x89ABCDEF, vk2x89ABCDEF);
133       vprodGHIJKLMN = wasm_i16x8_mul(vi2xGHIJKLMN, vk2xGHIJKLMN);
134 
135 
136       const v128_t vi3x01234567 = wasm_i16x8_load8x8(i3);
137       const v128_t vk3x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 72 * sizeof(int8_t)));
138       const v128_t vi3x89ABCDEF = wasm_i16x8_load8x8(i3 + 8);
139       const v128_t vk3x89ABCDEF = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 80 * sizeof(int8_t)));
140       const v128_t vi3xGHIJKLMN = wasm_i16x8_load8x8(i3 + 16);
141       const v128_t vk3xGHIJKLMN = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 88 * sizeof(int8_t)));
142       i3 += 24;
143 
144       vprod01234567 = wasm_i16x8_add(vprod01234567, wasm_i16x8_mul(vi3x01234567, vk3x01234567));
145       vprod89ABCDEF = wasm_i16x8_add(vprod89ABCDEF, wasm_i16x8_mul(vi3x89ABCDEF, vk3x89ABCDEF));
146       vprodGHIJKLMN = wasm_i16x8_add(vprodGHIJKLMN, wasm_i16x8_mul(vi3xGHIJKLMN, vk3xGHIJKLMN));
147 
148       vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
149       vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
150       vacc89AB = wasm_i32x4_add(vacc89AB, wasm_i32x4_extend_low_i16x8(vprod89ABCDEF));
151       vaccCDEF = wasm_i32x4_add(vaccCDEF, wasm_i32x4_extend_high_i16x8(vprod89ABCDEF));
152       vaccGHIJ = wasm_i32x4_add(vaccGHIJ, wasm_i32x4_extend_low_i16x8(vprodGHIJKLMN));
153       vaccKLMN = wasm_i32x4_add(vaccKLMN, wasm_i32x4_extend_high_i16x8(vprodGHIJKLMN));
154 
155       const v128_t vi4x01234567 = wasm_i16x8_load8x8(i4);
156       const v128_t vk4x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 96 * sizeof(int8_t)));
157       const v128_t vi4x89ABCDEF = wasm_i16x8_load8x8(i4 + 8);
158       const v128_t vk4x89ABCDEF = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 104 * sizeof(int8_t)));
159       const v128_t vi4xGHIJKLMN = wasm_i16x8_load8x8(i4 + 16);
160       const v128_t vk4xGHIJKLMN = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 112 * sizeof(int8_t)));
161       i4 += 24;
162 
163       vprod01234567 = wasm_i16x8_mul(vi4x01234567, vk4x01234567);
164       vprod89ABCDEF = wasm_i16x8_mul(vi4x89ABCDEF, vk4x89ABCDEF);
165       vprodGHIJKLMN = wasm_i16x8_mul(vi4xGHIJKLMN, vk4xGHIJKLMN);
166 
167 
168       const v128_t vi5x01234567 = wasm_i16x8_load8x8(i5);
169       const v128_t vk5x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 120 * sizeof(int8_t)));
170       const v128_t vi5x89ABCDEF = wasm_i16x8_load8x8(i5 + 8);
171       const v128_t vk5x89ABCDEF = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 128 * sizeof(int8_t)));
172       const v128_t vi5xGHIJKLMN = wasm_i16x8_load8x8(i5 + 16);
173       const v128_t vk5xGHIJKLMN = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 136 * sizeof(int8_t)));
174       i5 += 24;
175 
176       vprod01234567 = wasm_i16x8_add(vprod01234567, wasm_i16x8_mul(vi5x01234567, vk5x01234567));
177       vprod89ABCDEF = wasm_i16x8_add(vprod89ABCDEF, wasm_i16x8_mul(vi5x89ABCDEF, vk5x89ABCDEF));
178       vprodGHIJKLMN = wasm_i16x8_add(vprodGHIJKLMN, wasm_i16x8_mul(vi5xGHIJKLMN, vk5xGHIJKLMN));
179 
180       vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
181       vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
182       vacc89AB = wasm_i32x4_add(vacc89AB, wasm_i32x4_extend_low_i16x8(vprod89ABCDEF));
183       vaccCDEF = wasm_i32x4_add(vaccCDEF, wasm_i32x4_extend_high_i16x8(vprod89ABCDEF));
184       vaccGHIJ = wasm_i32x4_add(vaccGHIJ, wasm_i32x4_extend_low_i16x8(vprodGHIJKLMN));
185       vaccKLMN = wasm_i32x4_add(vaccKLMN, wasm_i32x4_extend_high_i16x8(vprodGHIJKLMN));
186 
187       const v128_t vi6x01234567 = wasm_i16x8_load8x8(i6);
188       const v128_t vk6x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 144 * sizeof(int8_t)));
189       const v128_t vi6x89ABCDEF = wasm_i16x8_load8x8(i6 + 8);
190       const v128_t vk6x89ABCDEF = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 152 * sizeof(int8_t)));
191       const v128_t vi6xGHIJKLMN = wasm_i16x8_load8x8(i6 + 16);
192       const v128_t vk6xGHIJKLMN = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 160 * sizeof(int8_t)));
193       i6 += 24;
194 
195       vprod01234567 = wasm_i16x8_mul(vi6x01234567, vk6x01234567);
196       vprod89ABCDEF = wasm_i16x8_mul(vi6x89ABCDEF, vk6x89ABCDEF);
197       vprodGHIJKLMN = wasm_i16x8_mul(vi6xGHIJKLMN, vk6xGHIJKLMN);
198 
199 
200       const v128_t vi7x01234567 = wasm_i16x8_load8x8(i7);
201       const v128_t vk7x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 168 * sizeof(int8_t)));
202       const v128_t vi7x89ABCDEF = wasm_i16x8_load8x8(i7 + 8);
203       const v128_t vk7x89ABCDEF = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 176 * sizeof(int8_t)));
204       const v128_t vi7xGHIJKLMN = wasm_i16x8_load8x8(i7 + 16);
205       const v128_t vk7xGHIJKLMN = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 184 * sizeof(int8_t)));
206       i7 += 24;
207 
208       vprod01234567 = wasm_i16x8_add(vprod01234567, wasm_i16x8_mul(vi7x01234567, vk7x01234567));
209       vprod89ABCDEF = wasm_i16x8_add(vprod89ABCDEF, wasm_i16x8_mul(vi7x89ABCDEF, vk7x89ABCDEF));
210       vprodGHIJKLMN = wasm_i16x8_add(vprodGHIJKLMN, wasm_i16x8_mul(vi7xGHIJKLMN, vk7xGHIJKLMN));
211 
212       vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
213       vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
214       vacc89AB = wasm_i32x4_add(vacc89AB, wasm_i32x4_extend_low_i16x8(vprod89ABCDEF));
215       vaccCDEF = wasm_i32x4_add(vaccCDEF, wasm_i32x4_extend_high_i16x8(vprod89ABCDEF));
216       vaccGHIJ = wasm_i32x4_add(vaccGHIJ, wasm_i32x4_extend_low_i16x8(vprodGHIJKLMN));
217       vaccKLMN = wasm_i32x4_add(vaccKLMN, wasm_i32x4_extend_high_i16x8(vprodGHIJKLMN));
218 
219       const v128_t vi8x01234567 = wasm_i16x8_load8x8(i8);
220       const v128_t vk8x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 192 * sizeof(int8_t)));
221       const v128_t vi8x89ABCDEF = wasm_i16x8_load8x8(i8 + 8);
222       const v128_t vk8x89ABCDEF = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 200 * sizeof(int8_t)));
223       const v128_t vi8xGHIJKLMN = wasm_i16x8_load8x8(i8 + 16);
224       const v128_t vk8xGHIJKLMN = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 208 * sizeof(int8_t)));
225       i8 += 24;
226 
227       vprod01234567 = wasm_i16x8_mul(vi8x01234567, vk8x01234567);
228       vprod89ABCDEF = wasm_i16x8_mul(vi8x89ABCDEF, vk8x89ABCDEF);
229       vprodGHIJKLMN = wasm_i16x8_mul(vi8xGHIJKLMN, vk8xGHIJKLMN);
230 
231       vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
232       vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
233       vacc89AB = wasm_i32x4_add(vacc89AB, wasm_i32x4_extend_low_i16x8(vprod89ABCDEF));
234       vaccCDEF = wasm_i32x4_add(vaccCDEF, wasm_i32x4_extend_high_i16x8(vprod89ABCDEF));
235       vaccGHIJ = wasm_i32x4_add(vaccGHIJ, wasm_i32x4_extend_low_i16x8(vprodGHIJKLMN));
236       vaccKLMN = wasm_i32x4_add(vaccKLMN, wasm_i32x4_extend_high_i16x8(vprodGHIJKLMN));
237 
238 
239       w = (const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 216 * sizeof(int8_t));
240 
241       vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
242       vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
243       vacc89AB = wasm_f32x4_convert_i32x4(vacc89AB);
244       vaccCDEF = wasm_f32x4_convert_i32x4(vaccCDEF);
245       vaccGHIJ = wasm_f32x4_convert_i32x4(vaccGHIJ);
246       vaccKLMN = wasm_f32x4_convert_i32x4(vaccKLMN);
247 
248       const v128_t vscale0123 = wasm_v128_load(w);
249       const v128_t vscale4567 = wasm_v128_load((const float*) w + 4);
250       const v128_t vscale89AB = wasm_v128_load((const float*) w + 8);
251       const v128_t vscaleCDEF = wasm_v128_load((const float*) w + 12);
252       const v128_t vscaleGHIJ = wasm_v128_load((const float*) w + 16);
253       const v128_t vscaleKLMN = wasm_v128_load((const float*) w + 20);
254       w = (const void*) ((const float*) w + 24);
255 
256       vacc0123 = wasm_f32x4_mul(vacc0123, vscale0123);
257       vacc4567 = wasm_f32x4_mul(vacc4567, vscale4567);
258       vacc89AB = wasm_f32x4_mul(vacc89AB, vscale89AB);
259       vaccCDEF = wasm_f32x4_mul(vaccCDEF, vscaleCDEF);
260       vaccGHIJ = wasm_f32x4_mul(vaccGHIJ, vscaleGHIJ);
261       vaccKLMN = wasm_f32x4_mul(vaccKLMN, vscaleKLMN);
262 
263       const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
264       vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
265       vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
266       vacc89AB = wasm_f32x4_add(vacc89AB, vmagic_bias);
267       vaccCDEF = wasm_f32x4_add(vaccCDEF, vmagic_bias);
268       vaccGHIJ = wasm_f32x4_add(vaccGHIJ, vmagic_bias);
269       vaccKLMN = wasm_f32x4_add(vaccKLMN, vmagic_bias);
270 
271       const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
272       vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
273       vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
274       vacc89AB = wasm_i32x4_max(vacc89AB, vmagic_min);
275       vaccCDEF = wasm_i32x4_max(vaccCDEF, vmagic_min);
276       vaccGHIJ = wasm_i32x4_max(vaccGHIJ, vmagic_min);
277       vaccKLMN = wasm_i32x4_max(vaccKLMN, vmagic_min);
278 
279       const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
280       vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
281       vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
282       vacc89AB = wasm_i32x4_sub(vacc89AB, vmagic_bias_less_output_zero_point);
283       vaccCDEF = wasm_i32x4_sub(vaccCDEF, vmagic_bias_less_output_zero_point);
284       vaccGHIJ = wasm_i32x4_sub(vaccGHIJ, vmagic_bias_less_output_zero_point);
285       vaccKLMN = wasm_i32x4_sub(vaccKLMN, vmagic_bias_less_output_zero_point);
286 
287       v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
288       v128_t vout89ABCDEF = wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF);
289       v128_t voutGHIJKLMN = wasm_i16x8_narrow_i32x4(vaccGHIJ, vaccKLMN);
290 
291       v128_t vout0123456789ABCDEF = wasm_i8x16_narrow_i16x8(vout01234567, vout89ABCDEF);
292       v128_t voutGHIJKLMNGHIJKLMN = wasm_i8x16_narrow_i16x8(voutGHIJKLMN, voutGHIJKLMN);
293 
294       const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
295       vout0123456789ABCDEF = wasm_i8x16_min(vout0123456789ABCDEF, voutput_max);
296       voutGHIJKLMNGHIJKLMN = wasm_i8x16_min(voutGHIJKLMNGHIJKLMN, voutput_max);
297 
298       wasm_v128_store(output, vout0123456789ABCDEF);
299       *((double*) (output + 16)) = wasm_f64x2_extract_lane(voutGHIJKLMNGHIJKLMN, 0);
300       output += 24;
301     }
302     if XNN_UNLIKELY(c != 0) {
303       const int8_t* k = (const int8_t*) ((uintptr_t) w + 24 * sizeof(int32_t));
304       do {
305         v128_t vacc0123 = wasm_v128_load(w);
306         v128_t vacc4567 = wasm_v128_load((const void*) ((uintptr_t) w + 4 * sizeof(int32_t)));
307 
308 
309         const v128_t vi0x01234567 = wasm_i16x8_load8x8(i0);
310         const v128_t vk0x01234567 = wasm_i16x8_load8x8(k);
311         i0 += 8;
312 
313         v128_t vprod01234567 = wasm_i16x8_mul(vi0x01234567, vk0x01234567);
314 
315 
316         const v128_t vi1x01234567 = wasm_i16x8_load8x8(i1);
317         const v128_t vk1x01234567 = wasm_i16x8_load8x8((const void*) (k + 24));
318         i1 += 8;
319 
320         vprod01234567 = wasm_i16x8_add(vprod01234567, wasm_i16x8_mul(vi1x01234567, vk1x01234567));
321 
322         vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
323         vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
324 
325         const v128_t vi2x01234567 = wasm_i16x8_load8x8(i2);
326         const v128_t vk2x01234567 = wasm_i16x8_load8x8((const void*) (k + 48));
327         i2 += 8;
328 
329         vprod01234567 = wasm_i16x8_mul(vi2x01234567, vk2x01234567);
330 
331 
332         const v128_t vi3x01234567 = wasm_i16x8_load8x8(i3);
333         const v128_t vk3x01234567 = wasm_i16x8_load8x8((const void*) (k + 72));
334         i3 += 8;
335 
336         vprod01234567 = wasm_i16x8_add(vprod01234567, wasm_i16x8_mul(vi3x01234567, vk3x01234567));
337 
338         vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
339         vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
340 
341         const v128_t vi4x01234567 = wasm_i16x8_load8x8(i4);
342         const v128_t vk4x01234567 = wasm_i16x8_load8x8((const void*) (k + 96));
343         i4 += 8;
344 
345         vprod01234567 = wasm_i16x8_mul(vi4x01234567, vk4x01234567);
346 
347 
348         const v128_t vi5x01234567 = wasm_i16x8_load8x8(i5);
349         const v128_t vk5x01234567 = wasm_i16x8_load8x8((const void*) (k + 120));
350         i5 += 8;
351 
352         vprod01234567 = wasm_i16x8_add(vprod01234567, wasm_i16x8_mul(vi5x01234567, vk5x01234567));
353 
354         vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
355         vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
356 
357         const v128_t vi6x01234567 = wasm_i16x8_load8x8(i6);
358         const v128_t vk6x01234567 = wasm_i16x8_load8x8((const void*) (k + 144));
359         i6 += 8;
360 
361         vprod01234567 = wasm_i16x8_mul(vi6x01234567, vk6x01234567);
362 
363 
364         const v128_t vi7x01234567 = wasm_i16x8_load8x8(i7);
365         const v128_t vk7x01234567 = wasm_i16x8_load8x8((const void*) (k + 168));
366         i7 += 8;
367 
368         vprod01234567 = wasm_i16x8_add(vprod01234567, wasm_i16x8_mul(vi7x01234567, vk7x01234567));
369 
370         vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
371         vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
372 
373         const v128_t vi8x01234567 = wasm_i16x8_load8x8(i8);
374         const v128_t vk8x01234567 = wasm_i16x8_load8x8((const void*) (k + 192));
375         i8 += 8;
376 
377         vprod01234567 = wasm_i16x8_mul(vi8x01234567, vk8x01234567);
378 
379         vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
380         vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
381 
382         k += 8;
383 
384 
385       vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
386       vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
387 
388       const v128_t vscale0123 = wasm_v128_load((const float*) ((uintptr_t) w + 24 * sizeof(int32_t) + 216 * sizeof(int8_t)));
389       const v128_t vscale4567 = wasm_v128_load((const float*) ((uintptr_t) w + 24 * sizeof(int32_t) + 216 * sizeof(int8_t) + 4 * sizeof(float)));
390 
391       vacc0123 = wasm_f32x4_mul(vacc0123, vscale0123);
392       vacc4567 = wasm_f32x4_mul(vacc4567, vscale4567);
393 
394       const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
395       vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
396       vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
397 
398       const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
399       vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
400       vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
401 
402       const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
403       vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
404       vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
405 
406       v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
407       v128_t vout0123456701234567 = wasm_i8x16_narrow_i16x8(vout01234567, vout01234567);
408 
409       const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
410       vout0123456701234567 = wasm_i8x16_min(vout0123456701234567, voutput_max);
411 
412       w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
413 
414       if XNN_LIKELY(c >= 8) {
415         *((double*) output) = wasm_f64x2_extract_lane(vout0123456701234567, 0);
416         output += 8;
417         c -= 8;
418       } else {
419         if (c & 4) {
420           *((float*) output) = wasm_f32x4_extract_lane(vout0123456701234567, 0);
421           vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
422           output += 4;
423         }
424         uint32_t vout0123 = wasm_i32x4_extract_lane(vout0123456701234567, 0);
425         if (c & 2) {
426           *((uint16_t*) output) = (uint16_t) vout0123;
427           vout0123 >>= 16;
428           output += 2;
429         }
430         if (c & 1) {
431           *output = (int8_t) vout0123;
432           output += 1;
433         }
434         c = 0;
435       }
436       } while (c != 0);
437     }
438 
439     output = (int8_t*) ((uintptr_t) output + output_increment);
440   } while (--output_width != 0);
441 }
442