1 // Auto-generated file. Do not edit!
2 // Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <wasm_simd128.h>
13
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/math.h>
16
17
18
xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_4x4(size_t input_height,size_t input_width,const float * input,const float * weights,const float * zero,float * output,uint32_t padding_top,const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_4x4(
20 size_t input_height,
21 size_t input_width,
22 const float* input,
23 const float* weights,
24 const float* zero,
25 float* output,
26 uint32_t padding_top,
27 const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28 {
29 assert(input_height != 0);
30 assert(input_width != 0);
31 assert(input_width % sizeof(float) == 0);
32 assert(padding_top >= 0);
33 assert(padding_top <= 1);
34
35 const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
36 const v128_t vmask_odd = wasm_v128_load(params->scalar.mask_odd);
37 const v128_t vmax = wasm_v128_load32_splat(¶ms->scalar.max);
38 const v128_t vmin = wasm_v128_load32_splat(¶ms->scalar.min);
39
40 const v128_t vw0123 = wasm_v128_load(weights);
41 const v128_t vw4567 = wasm_v128_load(weights + 4);
42 const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
43
44 const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
45 const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
46
47 const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
48 const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
49 if XNN_UNPREDICTABLE(padding_top != 0) {
50 i0 = zero;
51 }
52 const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
53 const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
54 const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
55 const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
56 const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
57 const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
58 const float* i8 = (const float*) ((uintptr_t) i7 + input_width);
59
60 float* o0 = output;
61 float* o1 = (float*) ((uintptr_t) o0 + output_width);
62 float* o2 = (float*) ((uintptr_t) o1 + output_width);
63 float* o3 = (float*) ((uintptr_t) o2 + output_width);
64
65 size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
66 size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
67 do {
68 if XNN_UNPREDICTABLE(padded_input_height < 4) {
69 i2 = zero;
70 }
71 if XNN_UNPREDICTABLE(padded_input_height < 5) {
72 i3 = zero;
73 o1 = o0;
74 }
75 if XNN_UNPREDICTABLE(padded_input_height < 6) {
76 i4 = zero;
77 }
78 if XNN_UNPREDICTABLE(padded_input_height < 7) {
79 i5 = zero;
80 o2 = o1;
81 }
82 if XNN_UNPREDICTABLE(padded_input_height < 8) {
83 i6 = zero;
84 }
85 if XNN_UNPREDICTABLE(padded_input_height < 9) {
86 i7 = zero;
87 o3 = o2;
88 }
89 if XNN_UNPREDICTABLE(padded_input_height < 10) {
90 i8 = zero;
91 }
92
93 v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
94 v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
95 v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
96 v128_t vi3x1357 = wasm_f32x4_const_splat(0.0f);
97 v128_t vi4x1357 = wasm_f32x4_const_splat(0.0f);
98 v128_t vi5x1357 = wasm_f32x4_const_splat(0.0f);
99 v128_t vi6x1357 = wasm_f32x4_const_splat(0.0f);
100 v128_t vi7x1357 = wasm_f32x4_const_splat(0.0f);
101 v128_t vi8x1357 = wasm_f32x4_const_splat(0.0f);
102
103 size_t w = input_width;
104 for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
105 v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
106 v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
107 v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
108 v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
109
110 const v128_t vi0x89AB = wasm_v128_load(i0);
111 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
112 i0 += 8;
113 const v128_t vi1x89AB = wasm_v128_load(i1);
114 const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
115 i1 += 8;
116 const v128_t vi2x89AB = wasm_v128_load(i2);
117 const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
118 i2 += 8;
119 const v128_t vi3x89AB = wasm_v128_load(i3);
120 const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
121 i3 += 8;
122 const v128_t vi4x89AB = wasm_v128_load(i4);
123 const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
124 i4 += 8;
125 const v128_t vi5x89AB = wasm_v128_load(i5);
126 const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
127 i5 += 8;
128 const v128_t vi6x89AB = wasm_v128_load(i6);
129 const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
130 i6 += 8;
131 const v128_t vi7x89AB = wasm_v128_load(i7);
132 const v128_t vi7xCDEF = wasm_v128_load(i7 + 4);
133 i7 += 8;
134 const v128_t vi8x89AB = wasm_v128_load(i8);
135 const v128_t vi8xCDEF = wasm_v128_load(i8 + 4);
136 i8 += 8;
137
138 const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
139 const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
140 const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
141 const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
142 const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
143 const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
144 const v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
145 const v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
146 const v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
147 const v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
148 const v128_t vi5x8ACE = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6);
149 const v128_t vi5x9BDF = wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7);
150 const v128_t vi6x8ACE = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6);
151 const v128_t vi6x9BDF = wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7);
152 const v128_t vi7x8ACE = wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 0, 2, 4, 6);
153 const v128_t vi7x9BDF = wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 1, 3, 5, 7);
154 const v128_t vi8x8ACE = wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 0, 2, 4, 6);
155 const v128_t vi8x9BDF = wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 1, 3, 5, 7);
156
157 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
158 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
159 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
160 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
161
162 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
163 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
164 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
165 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
166
167 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
168 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
169 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
170 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
171
172 const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
173 vi0x1357 = vi0x9BDF;
174 const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
175 vi1x1357 = vi1x9BDF;
176 const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
177 vi2x1357 = vi2x9BDF;
178 const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
179 vi3x1357 = vi3x9BDF;
180 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
181 vi4x1357 = vi4x9BDF;
182 const v128_t vi5x7BDF = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
183 vi5x1357 = vi5x9BDF;
184 const v128_t vi6x7BDF = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
185 vi6x1357 = vi6x9BDF;
186 const v128_t vi7x7BDF = wasm_v32x4_shuffle(vi7x1357, vi7x9BDF, 3, 4, 5, 6);
187 vi7x1357 = vi7x9BDF;
188 const v128_t vi8x7BDF = wasm_v32x4_shuffle(vi8x1357, vi8x9BDF, 3, 4, 5, 6);
189 vi8x1357 = vi8x9BDF;
190
191 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
192 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
193 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
194 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
195
196 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
197 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
198 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
199 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
200
201 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
202 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
203 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
204 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
205
206 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
207 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
208 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
209 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
210
211 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
212 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
213 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
214 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
215
216 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
217 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
218 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
219 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
220
221
222 v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
223 v128_t vo1 = wasm_f32x4_pmax(vmin, vo1p0);
224 v128_t vo2 = wasm_f32x4_pmax(vmin, vo2p0);
225 v128_t vo3 = wasm_f32x4_pmax(vmin, vo3p0);
226 vo0 = wasm_f32x4_pmin(vmax, vo0);
227 vo1 = wasm_f32x4_pmin(vmax, vo1);
228 vo2 = wasm_f32x4_pmin(vmax, vo2);
229 vo3 = wasm_f32x4_pmin(vmax, vo3);
230
231 wasm_v128_store(o3, vo3); o3 += 4;
232 wasm_v128_store(o2, vo2); o2 += 4;
233 wasm_v128_store(o1, vo1); o1 += 4;
234 wasm_v128_store(o0, vo0); o0 += 4;
235 }
236 // Last block has 0-7 pixels to process.
237 assert(w < 8 * sizeof(float));
238 if XNN_LIKELY(w != 0) {
239 v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
240 v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
241 v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
242 v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
243
244 const v128_t vi0x89AB = wasm_v128_load(i0);
245 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
246 const v128_t vi1x89AB = wasm_v128_load(i1);
247 const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
248 const v128_t vi2x89AB = wasm_v128_load(i2);
249 const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
250 const v128_t vi3x89AB = wasm_v128_load(i3);
251 const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
252 const v128_t vi4x89AB = wasm_v128_load(i4);
253 const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
254 const v128_t vi5x89AB = wasm_v128_load(i5);
255 const v128_t vi5xCDEF = wasm_v128_load(i5 + 4);
256 const v128_t vi6x89AB = wasm_v128_load(i6);
257 const v128_t vi6xCDEF = wasm_v128_load(i6 + 4);
258 const v128_t vi7x89AB = wasm_v128_load(i7);
259 const v128_t vi7xCDEF = wasm_v128_load(i7 + 4);
260 const v128_t vi8x89AB = wasm_v128_load(i8);
261 const v128_t vi8xCDEF = wasm_v128_load(i8 + 4);
262
263 const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
264 const v128_t vi0x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
265 const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
266 const v128_t vi1x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
267 const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
268 const v128_t vi2x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
269 const v128_t vi3x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6));
270 const v128_t vi3x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7));
271 const v128_t vi4x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6));
272 const v128_t vi4x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7));
273 const v128_t vi5x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 0, 2, 4, 6));
274 const v128_t vi5x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi5x89AB, vi5xCDEF, 1, 3, 5, 7));
275 const v128_t vi6x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 0, 2, 4, 6));
276 const v128_t vi6x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi6x89AB, vi6xCDEF, 1, 3, 5, 7));
277 const v128_t vi7x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 0, 2, 4, 6));
278 const v128_t vi7x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi7x89AB, vi7xCDEF, 1, 3, 5, 7));
279 const v128_t vi8x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 0, 2, 4, 6));
280 const v128_t vi8x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi8x89AB, vi8xCDEF, 1, 3, 5, 7));
281
282 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
283 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
284 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
285 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
286
287 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
288 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
289 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
290 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
291
292 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
293 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
294 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
295 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
296
297 const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
298 const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
299 const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
300 const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
301 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
302 const v128_t vi5x7BDF = wasm_v32x4_shuffle(vi5x1357, vi5x9BDF, 3, 4, 5, 6);
303 const v128_t vi6x7BDF = wasm_v32x4_shuffle(vi6x1357, vi6x9BDF, 3, 4, 5, 6);
304 const v128_t vi7x7BDF = wasm_v32x4_shuffle(vi7x1357, vi7x9BDF, 3, 4, 5, 6);
305 const v128_t vi8x7BDF = wasm_v32x4_shuffle(vi8x1357, vi8x9BDF, 3, 4, 5, 6);
306
307 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
308 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
309 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
310 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
311
312 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
313 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
314 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
315 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
316
317 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
318 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
319 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
320 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
321
322 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
323 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
324 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
325 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
326
327 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
328 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
329 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
330 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
331
332 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
333 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
334 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
335 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi8x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
336
337
338 v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
339 v128_t vo1 = wasm_f32x4_pmax(vmin, vo1p0);
340 v128_t vo2 = wasm_f32x4_pmax(vmin, vo2p0);
341 v128_t vo3 = wasm_f32x4_pmax(vmin, vo3p0);
342 vo0 = wasm_f32x4_pmin(vmax, vo0);
343 vo1 = wasm_f32x4_pmin(vmax, vo1);
344 vo2 = wasm_f32x4_pmin(vmax, vo2);
345 vo3 = wasm_f32x4_pmin(vmax, vo3);
346
347 w += 1 * sizeof(float);
348 if (w & (8 * sizeof(float))) {
349 wasm_v128_store(o3, vo3); o3 += 4;
350 wasm_v128_store(o2, vo2); o2 += 4;
351 wasm_v128_store(o1, vo1); o1 += 4;
352 wasm_v128_store(o0, vo0); o0 += 4;
353 } else {
354 if (w & (4 * sizeof(float))) {
355 *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0); o3 += 2;
356 *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
357 *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
358 *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
359
360 vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
361 vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
362 vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
363 vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
364 }
365 if (w & (2 * sizeof(float))) {
366 *o3 = wasm_f32x4_extract_lane(vo3, 0); o3 += 1;
367 *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
368 *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
369 *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
370 }
371 }
372 }
373
374 i0 = (const float*) ((uintptr_t) i8 - input_decrement);
375 i1 = (const float*) ((uintptr_t) i0 + input_width);
376 i2 = (const float*) ((uintptr_t) i1 + input_width);
377 i3 = (const float*) ((uintptr_t) i2 + input_width);
378 i4 = (const float*) ((uintptr_t) i3 + input_width);
379 i5 = (const float*) ((uintptr_t) i4 + input_width);
380 i6 = (const float*) ((uintptr_t) i5 + input_width);
381 i7 = (const float*) ((uintptr_t) i6 + input_width);
382 i8 = (const float*) ((uintptr_t) i7 + input_width);
383
384 o0 = o3;
385 o1 = (float*) ((uintptr_t) o0 + output_width);
386 o2 = (float*) ((uintptr_t) o1 + output_width);
387 o3 = (float*) ((uintptr_t) o2 + output_width);
388
389 output_height = doz(output_height, 4);
390 padded_input_height = doz(padded_input_height, 8);
391 } while (output_height != 0);
392 }
393