xref: /aosp_15_r20/external/XNNPACK/src/qs8-dwconv/gen/up24x9-minmax-fp32-wasmsimd-mul16.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-dwconv/unipass-wasmsimd-mul16.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 #include <xnnpack/dwconv.h>
15 
16 
xnn_qs8_dwconv_minmax_fp32_ukernel_up24x9__wasmsimd_mul16(size_t channels,size_t output_width,const int8_t ** input,const void * weights,int8_t * output,size_t input_stride,size_t output_increment,size_t input_offset,const int8_t * zero,const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_qs8_dwconv_minmax_fp32_ukernel_up24x9__wasmsimd_mul16(
18     size_t channels,
19     size_t output_width,
20     const int8_t** input,
21     const void* weights,
22     int8_t* output,
23     size_t input_stride,
24     size_t output_increment,
25     size_t input_offset,
26     const int8_t* zero,
27     const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28 {
29   assert(channels != 0);
30   assert(output_width != 0);
31 
32   do {
33     const int8_t* i0 = input[0];
34     assert(i0 != NULL);
35     if XNN_UNPREDICTABLE(i0 != zero) {
36       i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
37     }
38     const int8_t* i1 = input[1];
39     assert(i1 != NULL);
40     if XNN_UNPREDICTABLE(i1 != zero) {
41       i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
42     }
43     const int8_t* i2 = input[2];
44     assert(i2 != NULL);
45     if XNN_UNPREDICTABLE(i2 != zero) {
46       i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
47     }
48     const int8_t* i3 = input[3];
49     assert(i3 != NULL);
50     if XNN_UNPREDICTABLE(i3 != zero) {
51       i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
52     }
53     const int8_t* i4 = input[4];
54     assert(i4 != NULL);
55     if XNN_UNPREDICTABLE(i4 != zero) {
56       i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
57     }
58     const int8_t* i5 = input[5];
59     assert(i5 != NULL);
60     if XNN_UNPREDICTABLE(i5 != zero) {
61       i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
62     }
63     const int8_t* i6 = input[6];
64     assert(i6 != NULL);
65     if XNN_UNPREDICTABLE(i6 != zero) {
66       i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
67     }
68     const int8_t* i7 = input[7];
69     assert(i7 != NULL);
70     if XNN_UNPREDICTABLE(i7 != zero) {
71       i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
72     }
73     const int8_t* i8 = input[8];
74     assert(i8 != NULL);
75     if XNN_UNPREDICTABLE(i8 != zero) {
76       i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
77     }
78     input = (const int8_t**) ((uintptr_t) input + input_stride);
79 
80     size_t c = channels;
81     const void* w = weights;
82     for (; c >= 24; c -= 24) {
83       v128_t vacc0123 = wasm_v128_load(w);
84       v128_t vacc4567 = wasm_v128_load((const void*) ((uintptr_t) w + 4 * sizeof(int32_t)));
85       v128_t vacc89AB = wasm_v128_load((const void*) ((uintptr_t) w + 8 * sizeof(int32_t)));
86       v128_t vaccCDEF = wasm_v128_load((const void*) ((uintptr_t) w + 12 * sizeof(int32_t)));
87       v128_t vaccGHIJ = wasm_v128_load((const void*) ((uintptr_t) w + 16 * sizeof(int32_t)));
88       v128_t vaccKLMN = wasm_v128_load((const void*) ((uintptr_t) w + 20 * sizeof(int32_t)));
89 
90 
91       const v128_t vi0x01234567 = wasm_i16x8_load8x8(i0);
92       const v128_t vk0x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 0 * sizeof(int8_t)));
93       const v128_t vi0x89ABCDEF = wasm_i16x8_load8x8(i0 + 8);
94       const v128_t vk0x89ABCDEF = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 8 * sizeof(int8_t)));
95       const v128_t vi0xGHIJKLMN = wasm_i16x8_load8x8(i0 + 16);
96       const v128_t vk0xGHIJKLMN = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 16 * sizeof(int8_t)));
97       i0 += 24;
98 
99       v128_t vprod01234567 = wasm_i16x8_mul(vi0x01234567, vk0x01234567);
100       v128_t vprod89ABCDEF = wasm_i16x8_mul(vi0x89ABCDEF, vk0x89ABCDEF);
101       v128_t vprodGHIJKLMN = wasm_i16x8_mul(vi0xGHIJKLMN, vk0xGHIJKLMN);
102 
103       vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
104       vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
105       vacc89AB = wasm_i32x4_add(vacc89AB, wasm_i32x4_extend_low_i16x8(vprod89ABCDEF));
106       vaccCDEF = wasm_i32x4_add(vaccCDEF, wasm_i32x4_extend_high_i16x8(vprod89ABCDEF));
107       vaccGHIJ = wasm_i32x4_add(vaccGHIJ, wasm_i32x4_extend_low_i16x8(vprodGHIJKLMN));
108       vaccKLMN = wasm_i32x4_add(vaccKLMN, wasm_i32x4_extend_high_i16x8(vprodGHIJKLMN));
109 
110       const v128_t vi1x01234567 = wasm_i16x8_load8x8(i1);
111       const v128_t vk1x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 24 * sizeof(int8_t)));
112       const v128_t vi1x89ABCDEF = wasm_i16x8_load8x8(i1 + 8);
113       const v128_t vk1x89ABCDEF = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 32 * sizeof(int8_t)));
114       const v128_t vi1xGHIJKLMN = wasm_i16x8_load8x8(i1 + 16);
115       const v128_t vk1xGHIJKLMN = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 40 * sizeof(int8_t)));
116       i1 += 24;
117 
118       vprod01234567 = wasm_i16x8_mul(vi1x01234567, vk1x01234567);
119       vprod89ABCDEF = wasm_i16x8_mul(vi1x89ABCDEF, vk1x89ABCDEF);
120       vprodGHIJKLMN = wasm_i16x8_mul(vi1xGHIJKLMN, vk1xGHIJKLMN);
121 
122       vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
123       vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
124       vacc89AB = wasm_i32x4_add(vacc89AB, wasm_i32x4_extend_low_i16x8(vprod89ABCDEF));
125       vaccCDEF = wasm_i32x4_add(vaccCDEF, wasm_i32x4_extend_high_i16x8(vprod89ABCDEF));
126       vaccGHIJ = wasm_i32x4_add(vaccGHIJ, wasm_i32x4_extend_low_i16x8(vprodGHIJKLMN));
127       vaccKLMN = wasm_i32x4_add(vaccKLMN, wasm_i32x4_extend_high_i16x8(vprodGHIJKLMN));
128 
129       const v128_t vi2x01234567 = wasm_i16x8_load8x8(i2);
130       const v128_t vk2x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 48 * sizeof(int8_t)));
131       const v128_t vi2x89ABCDEF = wasm_i16x8_load8x8(i2 + 8);
132       const v128_t vk2x89ABCDEF = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 56 * sizeof(int8_t)));
133       const v128_t vi2xGHIJKLMN = wasm_i16x8_load8x8(i2 + 16);
134       const v128_t vk2xGHIJKLMN = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 64 * sizeof(int8_t)));
135       i2 += 24;
136 
137       vprod01234567 = wasm_i16x8_mul(vi2x01234567, vk2x01234567);
138       vprod89ABCDEF = wasm_i16x8_mul(vi2x89ABCDEF, vk2x89ABCDEF);
139       vprodGHIJKLMN = wasm_i16x8_mul(vi2xGHIJKLMN, vk2xGHIJKLMN);
140 
141       vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
142       vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
143       vacc89AB = wasm_i32x4_add(vacc89AB, wasm_i32x4_extend_low_i16x8(vprod89ABCDEF));
144       vaccCDEF = wasm_i32x4_add(vaccCDEF, wasm_i32x4_extend_high_i16x8(vprod89ABCDEF));
145       vaccGHIJ = wasm_i32x4_add(vaccGHIJ, wasm_i32x4_extend_low_i16x8(vprodGHIJKLMN));
146       vaccKLMN = wasm_i32x4_add(vaccKLMN, wasm_i32x4_extend_high_i16x8(vprodGHIJKLMN));
147 
148       const v128_t vi3x01234567 = wasm_i16x8_load8x8(i3);
149       const v128_t vk3x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 72 * sizeof(int8_t)));
150       const v128_t vi3x89ABCDEF = wasm_i16x8_load8x8(i3 + 8);
151       const v128_t vk3x89ABCDEF = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 80 * sizeof(int8_t)));
152       const v128_t vi3xGHIJKLMN = wasm_i16x8_load8x8(i3 + 16);
153       const v128_t vk3xGHIJKLMN = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 88 * sizeof(int8_t)));
154       i3 += 24;
155 
156       vprod01234567 = wasm_i16x8_mul(vi3x01234567, vk3x01234567);
157       vprod89ABCDEF = wasm_i16x8_mul(vi3x89ABCDEF, vk3x89ABCDEF);
158       vprodGHIJKLMN = wasm_i16x8_mul(vi3xGHIJKLMN, vk3xGHIJKLMN);
159 
160       vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
161       vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
162       vacc89AB = wasm_i32x4_add(vacc89AB, wasm_i32x4_extend_low_i16x8(vprod89ABCDEF));
163       vaccCDEF = wasm_i32x4_add(vaccCDEF, wasm_i32x4_extend_high_i16x8(vprod89ABCDEF));
164       vaccGHIJ = wasm_i32x4_add(vaccGHIJ, wasm_i32x4_extend_low_i16x8(vprodGHIJKLMN));
165       vaccKLMN = wasm_i32x4_add(vaccKLMN, wasm_i32x4_extend_high_i16x8(vprodGHIJKLMN));
166 
167       const v128_t vi4x01234567 = wasm_i16x8_load8x8(i4);
168       const v128_t vk4x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 96 * sizeof(int8_t)));
169       const v128_t vi4x89ABCDEF = wasm_i16x8_load8x8(i4 + 8);
170       const v128_t vk4x89ABCDEF = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 104 * sizeof(int8_t)));
171       const v128_t vi4xGHIJKLMN = wasm_i16x8_load8x8(i4 + 16);
172       const v128_t vk4xGHIJKLMN = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 112 * sizeof(int8_t)));
173       i4 += 24;
174 
175       vprod01234567 = wasm_i16x8_mul(vi4x01234567, vk4x01234567);
176       vprod89ABCDEF = wasm_i16x8_mul(vi4x89ABCDEF, vk4x89ABCDEF);
177       vprodGHIJKLMN = wasm_i16x8_mul(vi4xGHIJKLMN, vk4xGHIJKLMN);
178 
179       vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
180       vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
181       vacc89AB = wasm_i32x4_add(vacc89AB, wasm_i32x4_extend_low_i16x8(vprod89ABCDEF));
182       vaccCDEF = wasm_i32x4_add(vaccCDEF, wasm_i32x4_extend_high_i16x8(vprod89ABCDEF));
183       vaccGHIJ = wasm_i32x4_add(vaccGHIJ, wasm_i32x4_extend_low_i16x8(vprodGHIJKLMN));
184       vaccKLMN = wasm_i32x4_add(vaccKLMN, wasm_i32x4_extend_high_i16x8(vprodGHIJKLMN));
185 
186       const v128_t vi5x01234567 = wasm_i16x8_load8x8(i5);
187       const v128_t vk5x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 120 * sizeof(int8_t)));
188       const v128_t vi5x89ABCDEF = wasm_i16x8_load8x8(i5 + 8);
189       const v128_t vk5x89ABCDEF = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 128 * sizeof(int8_t)));
190       const v128_t vi5xGHIJKLMN = wasm_i16x8_load8x8(i5 + 16);
191       const v128_t vk5xGHIJKLMN = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 136 * sizeof(int8_t)));
192       i5 += 24;
193 
194       vprod01234567 = wasm_i16x8_mul(vi5x01234567, vk5x01234567);
195       vprod89ABCDEF = wasm_i16x8_mul(vi5x89ABCDEF, vk5x89ABCDEF);
196       vprodGHIJKLMN = wasm_i16x8_mul(vi5xGHIJKLMN, vk5xGHIJKLMN);
197 
198       vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
199       vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
200       vacc89AB = wasm_i32x4_add(vacc89AB, wasm_i32x4_extend_low_i16x8(vprod89ABCDEF));
201       vaccCDEF = wasm_i32x4_add(vaccCDEF, wasm_i32x4_extend_high_i16x8(vprod89ABCDEF));
202       vaccGHIJ = wasm_i32x4_add(vaccGHIJ, wasm_i32x4_extend_low_i16x8(vprodGHIJKLMN));
203       vaccKLMN = wasm_i32x4_add(vaccKLMN, wasm_i32x4_extend_high_i16x8(vprodGHIJKLMN));
204 
205       const v128_t vi6x01234567 = wasm_i16x8_load8x8(i6);
206       const v128_t vk6x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 144 * sizeof(int8_t)));
207       const v128_t vi6x89ABCDEF = wasm_i16x8_load8x8(i6 + 8);
208       const v128_t vk6x89ABCDEF = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 152 * sizeof(int8_t)));
209       const v128_t vi6xGHIJKLMN = wasm_i16x8_load8x8(i6 + 16);
210       const v128_t vk6xGHIJKLMN = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 160 * sizeof(int8_t)));
211       i6 += 24;
212 
213       vprod01234567 = wasm_i16x8_mul(vi6x01234567, vk6x01234567);
214       vprod89ABCDEF = wasm_i16x8_mul(vi6x89ABCDEF, vk6x89ABCDEF);
215       vprodGHIJKLMN = wasm_i16x8_mul(vi6xGHIJKLMN, vk6xGHIJKLMN);
216 
217       vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
218       vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
219       vacc89AB = wasm_i32x4_add(vacc89AB, wasm_i32x4_extend_low_i16x8(vprod89ABCDEF));
220       vaccCDEF = wasm_i32x4_add(vaccCDEF, wasm_i32x4_extend_high_i16x8(vprod89ABCDEF));
221       vaccGHIJ = wasm_i32x4_add(vaccGHIJ, wasm_i32x4_extend_low_i16x8(vprodGHIJKLMN));
222       vaccKLMN = wasm_i32x4_add(vaccKLMN, wasm_i32x4_extend_high_i16x8(vprodGHIJKLMN));
223 
224       const v128_t vi7x01234567 = wasm_i16x8_load8x8(i7);
225       const v128_t vk7x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 168 * sizeof(int8_t)));
226       const v128_t vi7x89ABCDEF = wasm_i16x8_load8x8(i7 + 8);
227       const v128_t vk7x89ABCDEF = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 176 * sizeof(int8_t)));
228       const v128_t vi7xGHIJKLMN = wasm_i16x8_load8x8(i7 + 16);
229       const v128_t vk7xGHIJKLMN = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 184 * sizeof(int8_t)));
230       i7 += 24;
231 
232       vprod01234567 = wasm_i16x8_mul(vi7x01234567, vk7x01234567);
233       vprod89ABCDEF = wasm_i16x8_mul(vi7x89ABCDEF, vk7x89ABCDEF);
234       vprodGHIJKLMN = wasm_i16x8_mul(vi7xGHIJKLMN, vk7xGHIJKLMN);
235 
236       vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
237       vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
238       vacc89AB = wasm_i32x4_add(vacc89AB, wasm_i32x4_extend_low_i16x8(vprod89ABCDEF));
239       vaccCDEF = wasm_i32x4_add(vaccCDEF, wasm_i32x4_extend_high_i16x8(vprod89ABCDEF));
240       vaccGHIJ = wasm_i32x4_add(vaccGHIJ, wasm_i32x4_extend_low_i16x8(vprodGHIJKLMN));
241       vaccKLMN = wasm_i32x4_add(vaccKLMN, wasm_i32x4_extend_high_i16x8(vprodGHIJKLMN));
242 
243       const v128_t vi8x01234567 = wasm_i16x8_load8x8(i8);
244       const v128_t vk8x01234567 = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 192 * sizeof(int8_t)));
245       const v128_t vi8x89ABCDEF = wasm_i16x8_load8x8(i8 + 8);
246       const v128_t vk8x89ABCDEF = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 200 * sizeof(int8_t)));
247       const v128_t vi8xGHIJKLMN = wasm_i16x8_load8x8(i8 + 16);
248       const v128_t vk8xGHIJKLMN = wasm_i16x8_load8x8((const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 208 * sizeof(int8_t)));
249       i8 += 24;
250 
251       vprod01234567 = wasm_i16x8_mul(vi8x01234567, vk8x01234567);
252       vprod89ABCDEF = wasm_i16x8_mul(vi8x89ABCDEF, vk8x89ABCDEF);
253       vprodGHIJKLMN = wasm_i16x8_mul(vi8xGHIJKLMN, vk8xGHIJKLMN);
254 
255       vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
256       vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
257       vacc89AB = wasm_i32x4_add(vacc89AB, wasm_i32x4_extend_low_i16x8(vprod89ABCDEF));
258       vaccCDEF = wasm_i32x4_add(vaccCDEF, wasm_i32x4_extend_high_i16x8(vprod89ABCDEF));
259       vaccGHIJ = wasm_i32x4_add(vaccGHIJ, wasm_i32x4_extend_low_i16x8(vprodGHIJKLMN));
260       vaccKLMN = wasm_i32x4_add(vaccKLMN, wasm_i32x4_extend_high_i16x8(vprodGHIJKLMN));
261 
262 
263       w = (const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 216 * sizeof(int8_t));
264 
265       vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
266       vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
267       vacc89AB = wasm_f32x4_convert_i32x4(vacc89AB);
268       vaccCDEF = wasm_f32x4_convert_i32x4(vaccCDEF);
269       vaccGHIJ = wasm_f32x4_convert_i32x4(vaccGHIJ);
270       vaccKLMN = wasm_f32x4_convert_i32x4(vaccKLMN);
271 
272       const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
273       vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
274       vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
275       vacc89AB = wasm_f32x4_mul(vacc89AB, vscale);
276       vaccCDEF = wasm_f32x4_mul(vaccCDEF, vscale);
277       vaccGHIJ = wasm_f32x4_mul(vaccGHIJ, vscale);
278       vaccKLMN = wasm_f32x4_mul(vaccKLMN, vscale);
279 
280       const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
281       vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
282       vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
283       vacc89AB = wasm_f32x4_add(vacc89AB, vmagic_bias);
284       vaccCDEF = wasm_f32x4_add(vaccCDEF, vmagic_bias);
285       vaccGHIJ = wasm_f32x4_add(vaccGHIJ, vmagic_bias);
286       vaccKLMN = wasm_f32x4_add(vaccKLMN, vmagic_bias);
287 
288       const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
289       vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
290       vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
291       vacc89AB = wasm_i32x4_max(vacc89AB, vmagic_min);
292       vaccCDEF = wasm_i32x4_max(vaccCDEF, vmagic_min);
293       vaccGHIJ = wasm_i32x4_max(vaccGHIJ, vmagic_min);
294       vaccKLMN = wasm_i32x4_max(vaccKLMN, vmagic_min);
295 
296       const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
297       vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
298       vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
299       vacc89AB = wasm_i32x4_sub(vacc89AB, vmagic_bias_less_output_zero_point);
300       vaccCDEF = wasm_i32x4_sub(vaccCDEF, vmagic_bias_less_output_zero_point);
301       vaccGHIJ = wasm_i32x4_sub(vaccGHIJ, vmagic_bias_less_output_zero_point);
302       vaccKLMN = wasm_i32x4_sub(vaccKLMN, vmagic_bias_less_output_zero_point);
303 
304       v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
305       v128_t vout89ABCDEF = wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF);
306       v128_t voutGHIJKLMN = wasm_i16x8_narrow_i32x4(vaccGHIJ, vaccKLMN);
307 
308       v128_t vout0123456789ABCDEF = wasm_i8x16_narrow_i16x8(vout01234567, vout89ABCDEF);
309       v128_t voutGHIJKLMNGHIJKLMN = wasm_i8x16_narrow_i16x8(voutGHIJKLMN, voutGHIJKLMN);
310 
311       const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
312       vout0123456789ABCDEF = wasm_i8x16_min(vout0123456789ABCDEF, voutput_max);
313       voutGHIJKLMNGHIJKLMN = wasm_i8x16_min(voutGHIJKLMNGHIJKLMN, voutput_max);
314 
315       wasm_v128_store(output, vout0123456789ABCDEF);
316       *((double*) (output + 16)) = wasm_f64x2_extract_lane(voutGHIJKLMNGHIJKLMN, 0);
317       output += 24;
318     }
319     if XNN_UNLIKELY(c != 0) {
320       const int8_t* k = (const int8_t*) ((uintptr_t) w + 24 * sizeof(int32_t));
321       do {
322         v128_t vacc0123 = wasm_v128_load(w);
323         v128_t vacc4567 = wasm_v128_load((const void*) ((uintptr_t) w + 4 * sizeof(int32_t)));
324 
325 
326         const v128_t vi0x01234567 = wasm_i16x8_load8x8(i0);
327         const v128_t vk0x01234567 = wasm_i16x8_load8x8(k);
328         i0 += 8;
329 
330         v128_t vprod01234567 = wasm_i16x8_mul(vi0x01234567, vk0x01234567);
331 
332         vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
333         vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
334 
335         const v128_t vi1x01234567 = wasm_i16x8_load8x8(i1);
336         const v128_t vk1x01234567 = wasm_i16x8_load8x8((const void*) (k + 24));
337         i1 += 8;
338 
339         vprod01234567 = wasm_i16x8_mul(vi1x01234567, vk1x01234567);
340 
341         vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
342         vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
343 
344         const v128_t vi2x01234567 = wasm_i16x8_load8x8(i2);
345         const v128_t vk2x01234567 = wasm_i16x8_load8x8((const void*) (k + 48));
346         i2 += 8;
347 
348         vprod01234567 = wasm_i16x8_mul(vi2x01234567, vk2x01234567);
349 
350         vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
351         vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
352 
353         const v128_t vi3x01234567 = wasm_i16x8_load8x8(i3);
354         const v128_t vk3x01234567 = wasm_i16x8_load8x8((const void*) (k + 72));
355         i3 += 8;
356 
357         vprod01234567 = wasm_i16x8_mul(vi3x01234567, vk3x01234567);
358 
359         vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
360         vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
361 
362         const v128_t vi4x01234567 = wasm_i16x8_load8x8(i4);
363         const v128_t vk4x01234567 = wasm_i16x8_load8x8((const void*) (k + 96));
364         i4 += 8;
365 
366         vprod01234567 = wasm_i16x8_mul(vi4x01234567, vk4x01234567);
367 
368         vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
369         vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
370 
371         const v128_t vi5x01234567 = wasm_i16x8_load8x8(i5);
372         const v128_t vk5x01234567 = wasm_i16x8_load8x8((const void*) (k + 120));
373         i5 += 8;
374 
375         vprod01234567 = wasm_i16x8_mul(vi5x01234567, vk5x01234567);
376 
377         vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
378         vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
379 
380         const v128_t vi6x01234567 = wasm_i16x8_load8x8(i6);
381         const v128_t vk6x01234567 = wasm_i16x8_load8x8((const void*) (k + 144));
382         i6 += 8;
383 
384         vprod01234567 = wasm_i16x8_mul(vi6x01234567, vk6x01234567);
385 
386         vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
387         vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
388 
389         const v128_t vi7x01234567 = wasm_i16x8_load8x8(i7);
390         const v128_t vk7x01234567 = wasm_i16x8_load8x8((const void*) (k + 168));
391         i7 += 8;
392 
393         vprod01234567 = wasm_i16x8_mul(vi7x01234567, vk7x01234567);
394 
395         vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
396         vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
397 
398         const v128_t vi8x01234567 = wasm_i16x8_load8x8(i8);
399         const v128_t vk8x01234567 = wasm_i16x8_load8x8((const void*) (k + 192));
400         i8 += 8;
401 
402         vprod01234567 = wasm_i16x8_mul(vi8x01234567, vk8x01234567);
403 
404         vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vprod01234567));
405         vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vprod01234567));
406 
407         k += 8;
408 
409 
410       vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
411       vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
412 
413       const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
414       vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
415       vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
416 
417       const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
418       vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
419       vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
420 
421       const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
422       vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
423       vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
424 
425       const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
426       vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
427       vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
428 
429       v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
430       v128_t vout0123456701234567 = wasm_i8x16_narrow_i16x8(vout01234567, vout01234567);
431 
432       const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
433       vout0123456701234567 = wasm_i8x16_min(vout0123456701234567, voutput_max);
434 
435       w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
436 
437       if XNN_LIKELY(c >= 8) {
438         *((double*) output) = wasm_f64x2_extract_lane(vout0123456701234567, 0);
439         output += 8;
440         c -= 8;
441       } else {
442         if (c & 4) {
443           *((float*) output) = wasm_f32x4_extract_lane(vout0123456701234567, 0);
444           vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
445           output += 4;
446         }
447         uint32_t vout0123 = wasm_i32x4_extract_lane(vout0123456701234567, 0);
448         if (c & 2) {
449           *((uint16_t*) output) = (uint16_t) vout0123;
450           vout0123 >>= 16;
451           output += 2;
452         }
453         if (c & 1) {
454           *output = (int8_t) vout0123;
455           output += 1;
456         }
457         c = 0;
458       }
459       } while (c != 0);
460     }
461 
462     output = (int8_t*) ((uintptr_t) output + output_increment);
463   } while (--output_width != 0);
464 }
465