xref: /aosp_15_r20/external/XNNPACK/src/f32-dwconv2d-chw/gen/5x5p2-minmax-wasmsimd-x86-splat-1x4.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-splat.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/math.h>
16 
17 
18 
xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4(size_t input_height,size_t input_width,const float * input,const float * weights,const float * zero,float * output,uint32_t padding_top,const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_1x4(
20     size_t input_height,
21     size_t input_width,
22     const float* input,
23     const float* weights,
24     const float* zero,
25     float* output,
26     uint32_t padding_top,
27     const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28 {
29   assert(input_height != 0);
30   assert(input_width != 0);
31   assert(input_width % sizeof(float) == 0);
32   assert(padding_top == 2);
33 
34   const v128_t vmask = wasm_v128_load(params->scalar.mask);
35   const v128_t vmax = wasm_v128_load32_splat(&params->scalar.max);
36   const v128_t vmin = wasm_v128_load32_splat(&params->scalar.min);
37 
38   const v128_t vw0123 = wasm_v128_load(weights);
39   const v128_t vw4567 = wasm_v128_load(weights + 4);
40   const v128_t vw89AB = wasm_v128_load(weights + 8);
41   const v128_t vwCDEF = wasm_v128_load(weights + 12);
42   const v128_t vwGHIJ = wasm_v128_load(weights + 16);
43   const v128_t vwKLMN = wasm_v128_load(weights + 20);
44   const v128_t vwOP = wasm_v128_load64_splat(weights + 24);
45 
46   const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
47 
48   const float* i0 = zero;
49   const float* i1 = zero;
50   const float* i2 = input;
51   const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
52   const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
53 
54   float* o0 = output;
55 
56   size_t output_height = input_height;
57   do {
58     if XNN_UNPREDICTABLE(output_height < 2) {
59       i3 = zero;
60     }
61     if XNN_UNPREDICTABLE(output_height < 3) {
62       i4 = zero;
63     }
64 
65     v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
66     v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
67     v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
68     v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
69     v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
70 
71     v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
72     v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
73     v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
74     v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
75     v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
76 
77     size_t w = input_width;
78     for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
79       v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
80 
81       const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
82       const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
83       const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
84       const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
85       const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
86 
87       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
88 
89       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
90 
91       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
92 
93       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
94 
95       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
96 
97       const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
98       const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
99       const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
100       const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
101       const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
102 
103       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
104 
105       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
106 
107       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
108 
109       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
110 
111       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
112 
113       const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
114       vi0x0123 = vi0x4567;
115       const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
116       vi1x0123 = vi1x4567;
117       const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
118       vi2x0123 = vi2x4567;
119       const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
120       vi3x0123 = vi3x4567;
121       const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
122       vi4x0123 = vi4x4567;
123 
124       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
125 
126       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
127 
128       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
129 
130       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
131 
132       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
133 
134       const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
135       const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
136       const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
137       const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
138       const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
139 
140       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
141 
142       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
143 
144       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
145 
146       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
147 
148       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
149 
150       const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
151       vi0x4567 = vi0x89AB;
152       const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
153       vi1x4567 = vi1x89AB;
154       const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
155       vi2x4567 = vi2x89AB;
156       const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
157       vi3x4567 = vi3x89AB;
158       const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
159       vi4x4567 = vi4x89AB;
160 
161       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
162 
163       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
164 
165       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
166 
167       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
168 
169       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
170 
171 
172       v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
173       vo0 = wasm_f32x4_pmin(vmax, vo0);
174 
175       wasm_v128_store(o0, vo0); o0 += 4;
176     }
177     // Always process the last block of 5..8 pixels.
178     if XNN_LIKELY(w > 4 * sizeof(float)) {
179       v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
180 
181       v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
182       v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
183       v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
184       v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
185       v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
186 
187       vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
188       vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
189       vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
190       vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
191       vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
192 
193       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
194 
195       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
196 
197       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
198 
199       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
200 
201       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
202 
203       const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
204       const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
205       const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
206       const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
207       const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
208 
209       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
210 
211       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
212 
213       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
214 
215       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
216 
217       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
218 
219       const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
220       vi0x0123 = vi0x4567;
221       const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
222       vi1x0123 = vi1x4567;
223       const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
224       vi2x0123 = vi2x4567;
225       const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
226       vi3x0123 = vi3x4567;
227       const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
228       vi4x0123 = vi4x4567;
229 
230       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
231 
232       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
233 
234       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
235 
236       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
237 
238       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
239 
240       const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
241       const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
242       const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
243       const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
244       const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
245 
246       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
247 
248       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
249 
250       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
251 
252       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
253 
254       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
255 
256       const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
257       vi0x4567 = vi0x89AB;
258       const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
259       vi1x4567 = vi1x89AB;
260       const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
261       vi2x4567 = vi2x89AB;
262       const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
263       vi3x4567 = vi3x89AB;
264       const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
265       vi4x4567 = vi4x89AB;
266 
267       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
268 
269       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
270 
271       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
272 
273       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
274 
275       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
276 
277 
278       v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
279       vo0 = wasm_f32x4_pmin(vmax, vo0);
280 
281       wasm_v128_store(o0, vo0); o0 += 4;
282 
283       w -= 4 * sizeof(float);
284     }
285     assert(w >= 1 * sizeof(float));
286     assert(w <= 4 * sizeof(float));
287     {
288       v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
289 
290       vi0x4567 = wasm_v128_and(vmask, vi0x4567);
291       vi1x4567 = wasm_v128_and(vmask, vi1x4567);
292       vi2x4567 = wasm_v128_and(vmask, vi2x4567);
293       vi3x4567 = wasm_v128_and(vmask, vi3x4567);
294       vi4x4567 = wasm_v128_and(vmask, vi4x4567);
295 
296       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
297 
298       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0)));
299 
300       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1)));
301 
302       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2)));
303 
304       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3)));
305 
306       const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
307       const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
308       const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
309       const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
310       const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
311 
312       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
313 
314       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
315 
316       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0)));
317 
318       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1)));
319 
320       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2)));
321 
322       const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
323       const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
324       const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
325       const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
326       const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
327 
328       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
329 
330       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x2345, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
331 
332       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3)));
333 
334       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x2345, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0)));
335 
336       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1)));
337 
338       const v128_t vzero = wasm_f32x4_const_splat(0.0f);
339       const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
340       const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
341       const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
342       const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
343       const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
344 
345       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
346 
347       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1)));
348 
349       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2)));
350 
351       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3)));
352 
353       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0)));
354 
355       const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
356       const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
357       const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
358       const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
359       const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
360 
361       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
362 
363       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x6789, wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2)));
364 
365       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3)));
366 
367       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x6789, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0)));
368 
369       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1)));
370 
371 
372       v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
373       vo0 = wasm_f32x4_pmin(vmax, vo0);
374 
375       if XNN_LIKELY(w & (4 * sizeof(float))) {
376         wasm_v128_store(o0, vo0); o0 += 4;
377       } else {
378         if (w & (2 * sizeof(float))) {
379           *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
380 
381           vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
382         }
383         if (w & (1 * sizeof(float))) {
384           *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
385         }
386       }
387     }
388 
389     i0 = (const float*) ((uintptr_t) i1 - input_decrement);
390     i1 = (const float*) ((uintptr_t) i2 - input_decrement);
391     i2 = (const float*) ((uintptr_t) i1 + input_width);
392     i3 = (const float*) ((uintptr_t) i2 + input_width);
393     i4 = (const float*) ((uintptr_t) i3 + input_width);
394 
395 
396   } while (--output_height != 0);
397 }
398