1 // Auto-generated file. Do not edit!
2 // Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <wasm_simd128.h>
13
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/math.h>
16
17
18
xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_6x4(size_t input_height,size_t input_width,const float * input,const float * weights,const float * zero,float * output,uint32_t padding_top,const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_6x4(
20 size_t input_height,
21 size_t input_width,
22 const float* input,
23 const float* weights,
24 const float* zero,
25 float* output,
26 uint32_t padding_top,
27 const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28 {
29 assert(input_height != 0);
30 assert(input_width != 0);
31 assert(input_width % sizeof(float) == 0);
32 assert(padding_top == 1);
33
34 const v128_t vmask = wasm_v128_load(params->scalar.mask);
35 const v128_t vmax = wasm_v128_load32_splat(¶ms->scalar.max);
36 const v128_t vmin = wasm_v128_load32_splat(¶ms->scalar.min);
37
38 const v128_t vw0123 = wasm_v128_load(weights);
39 const v128_t vw4567 = wasm_v128_load(weights + 4);
40 const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
41
42 const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
43
44 const float* i0 = zero;
45 const float* i1 = input;
46 const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
47 const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
48 const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
49 const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
50 const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
51 const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
52
53 float* o0 = output;
54 float* o1 = (float*) ((uintptr_t) o0 + input_width);
55 float* o2 = (float*) ((uintptr_t) o1 + input_width);
56 float* o3 = (float*) ((uintptr_t) o2 + input_width);
57 float* o4 = (float*) ((uintptr_t) o3 + input_width);
58 float* o5 = (float*) ((uintptr_t) o4 + input_width);
59
60 size_t output_height = input_height;
61 do {
62 if XNN_UNPREDICTABLE(output_height < 2) {
63 i2 = zero;
64 o1 = o0;
65 }
66 if XNN_UNPREDICTABLE(output_height < 3) {
67 i3 = zero;
68 o2 = o1;
69 }
70 if XNN_UNPREDICTABLE(output_height < 4) {
71 i4 = zero;
72 o3 = o2;
73 }
74 if XNN_UNPREDICTABLE(output_height < 5) {
75 i5 = zero;
76 o4 = o3;
77 }
78 if XNN_UNPREDICTABLE(output_height < 6) {
79 i6 = zero;
80 o5 = o4;
81 }
82 if XNN_UNPREDICTABLE(output_height < 7) {
83 i7 = zero;
84 }
85
86 v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
87 v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
88 v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
89 v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
90 v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
91 v128_t vi5x0123 = wasm_f32x4_const_splat(0.0f);
92 v128_t vi6x0123 = wasm_f32x4_const_splat(0.0f);
93 v128_t vi7x0123 = wasm_f32x4_const_splat(0.0f);
94
95 v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
96 v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
97 v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
98 v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
99 v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
100 v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
101 v128_t vi6x4567 = wasm_v128_load(i6); i6 += 4;
102 v128_t vi7x4567 = wasm_v128_load(i7); i7 += 4;
103
104 size_t w = input_width;
105 for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
106 v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
107 v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
108 v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
109 v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
110 v128_t vo4p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
111 v128_t vo5p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
112
113 const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
114 const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
115 const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
116 const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
117 const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
118 const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
119 const v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
120 const v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4;
121
122 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
123 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
124 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
125 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
126 vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
127 vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
128
129 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
130 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
131 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
132 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
133 vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
134 vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
135
136 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
137 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
138 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
139 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
140 vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
141 vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
142
143 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
144 const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
145 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
146 const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
147 const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
148 const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
149 const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
150 const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
151
152 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
153 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
154 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
155 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
156 vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
157 vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
158
159 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
160 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
161 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
162 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
163 vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
164 vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
165
166 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
167 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
168 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
169 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
170 vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
171 vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
172
173 vi0x0123 = vi0x4567;
174 vi1x0123 = vi1x4567;
175 vi2x0123 = vi2x4567;
176 vi3x0123 = vi3x4567;
177 vi4x0123 = vi4x4567;
178 vi5x0123 = vi5x4567;
179 vi6x0123 = vi6x4567;
180 vi7x0123 = vi7x4567;
181
182 const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
183 const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
184 const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
185 const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
186 const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
187 const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
188 const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
189 const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4);
190
191 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
192 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
193 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
194 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
195 vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
196 vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
197
198 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
199 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
200 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
201 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
202 vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
203 vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
204
205 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
206 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
207 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
208 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
209 vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
210 vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
211
212 vi0x4567 = vi0x89AB;
213 vi1x4567 = vi1x89AB;
214 vi2x4567 = vi2x89AB;
215 vi3x4567 = vi3x89AB;
216 vi4x4567 = vi4x89AB;
217 vi5x4567 = vi5x89AB;
218 vi6x4567 = vi6x89AB;
219 vi7x4567 = vi7x89AB;
220
221
222 v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
223 v128_t vo1 = wasm_f32x4_pmax(vmin, vo1p0);
224 v128_t vo2 = wasm_f32x4_pmax(vmin, vo2p0);
225 v128_t vo3 = wasm_f32x4_pmax(vmin, vo3p0);
226 v128_t vo4 = wasm_f32x4_pmax(vmin, vo4p0);
227 v128_t vo5 = wasm_f32x4_pmax(vmin, vo5p0);
228 vo0 = wasm_f32x4_pmin(vmax, vo0);
229 vo1 = wasm_f32x4_pmin(vmax, vo1);
230 vo2 = wasm_f32x4_pmin(vmax, vo2);
231 vo3 = wasm_f32x4_pmin(vmax, vo3);
232 vo4 = wasm_f32x4_pmin(vmax, vo4);
233 vo5 = wasm_f32x4_pmin(vmax, vo5);
234
235 wasm_v128_store(o5, vo5); o5 += 4;
236 wasm_v128_store(o4, vo4); o4 += 4;
237 wasm_v128_store(o3, vo3); o3 += 4;
238 wasm_v128_store(o2, vo2); o2 += 4;
239 wasm_v128_store(o1, vo1); o1 += 4;
240 wasm_v128_store(o0, vo0); o0 += 4;
241 }
242 // Always process the last block of 1..4 pixels.
243 assert(w >= 1 * sizeof(float));
244 assert(w <= 4 * sizeof(float));
245 {
246 v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
247 v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
248 v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
249 v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
250 v128_t vo4p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
251 v128_t vo5p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
252
253 vi0x4567 = wasm_v128_and(vmask, vi0x4567);
254 vi1x4567 = wasm_v128_and(vmask, vi1x4567);
255 vi2x4567 = wasm_v128_and(vmask, vi2x4567);
256 vi3x4567 = wasm_v128_and(vmask, vi3x4567);
257 vi4x4567 = wasm_v128_and(vmask, vi4x4567);
258 vi5x4567 = wasm_v128_and(vmask, vi5x4567);
259 vi6x4567 = wasm_v128_and(vmask, vi6x4567);
260 vi7x4567 = wasm_v128_and(vmask, vi7x4567);
261
262 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
263 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
264 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
265 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
266 vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
267 vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
268
269 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
270 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
271 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
272 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
273 vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
274 vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
275
276 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
277 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
278 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
279 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
280 vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
281 vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
282
283 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
284 const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
285 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
286 const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
287 const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
288 const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
289 const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
290 const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
291
292 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
293 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
294 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
295 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
296 vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
297 vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
298
299 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
300 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
301 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
302 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
303 vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
304 vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
305
306 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
307 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
308 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
309 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
310 vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
311 vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
312
313 const v128_t vzero = wasm_f32x4_const_splat(0.0f);
314 const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
315 const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
316 const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
317 const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
318 const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
319 const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
320 const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
321 const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vzero, 1, 2, 3, 4);
322
323 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
324 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
325 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
326 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
327 vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
328 vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
329
330 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
331 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
332 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
333 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
334 vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
335 vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
336
337 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
338 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
339 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
340 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
341 vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi6x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
342 vo5p0 = wasm_f32x4_add(vo5p0, wasm_f32x4_mul(vi7x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
343
344
345 v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
346 v128_t vo1 = wasm_f32x4_pmax(vmin, vo1p0);
347 v128_t vo2 = wasm_f32x4_pmax(vmin, vo2p0);
348 v128_t vo3 = wasm_f32x4_pmax(vmin, vo3p0);
349 v128_t vo4 = wasm_f32x4_pmax(vmin, vo4p0);
350 v128_t vo5 = wasm_f32x4_pmax(vmin, vo5p0);
351 vo0 = wasm_f32x4_pmin(vmax, vo0);
352 vo1 = wasm_f32x4_pmin(vmax, vo1);
353 vo2 = wasm_f32x4_pmin(vmax, vo2);
354 vo3 = wasm_f32x4_pmin(vmax, vo3);
355 vo4 = wasm_f32x4_pmin(vmax, vo4);
356 vo5 = wasm_f32x4_pmin(vmax, vo5);
357
358 if XNN_LIKELY(w == 4 * sizeof(float)) {
359 wasm_v128_store(o5, vo5); o5 += 4;
360 wasm_v128_store(o4, vo4); o4 += 4;
361 wasm_v128_store(o3, vo3); o3 += 4;
362 wasm_v128_store(o2, vo2); o2 += 4;
363 wasm_v128_store(o1, vo1); o1 += 4;
364 wasm_v128_store(o0, vo0); o0 += 4;
365 } else {
366 if (w & (2 * sizeof(float))) {
367 *((double*) o5) = wasm_f64x2_extract_lane(vo5, 0); o5 += 2;
368 *((double*) o4) = wasm_f64x2_extract_lane(vo4, 0); o4 += 2;
369 *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0); o3 += 2;
370 *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
371 *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
372 *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
373
374 vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
375 vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
376 vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
377 vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
378 vo4 = wasm_v32x4_shuffle(vo4, vo4, 2, 3, 0, 1);
379 vo5 = wasm_v32x4_shuffle(vo5, vo5, 2, 3, 0, 1);
380 }
381 if (w & (1 * sizeof(float))) {
382 *o5 = wasm_f32x4_extract_lane(vo5, 0); o5 += 1;
383 *o4 = wasm_f32x4_extract_lane(vo4, 0); o4 += 1;
384 *o3 = wasm_f32x4_extract_lane(vo3, 0); o3 += 1;
385 *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
386 *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
387 *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
388 }
389 }
390 }
391
392 i0 = (const float*) ((uintptr_t) i6 - input_decrement);
393 i1 = (const float*) ((uintptr_t) i7 - input_decrement);
394 i2 = (const float*) ((uintptr_t) i1 + input_width);
395 i3 = (const float*) ((uintptr_t) i2 + input_width);
396 i4 = (const float*) ((uintptr_t) i3 + input_width);
397 i5 = (const float*) ((uintptr_t) i4 + input_width);
398 i6 = (const float*) ((uintptr_t) i5 + input_width);
399 i7 = (const float*) ((uintptr_t) i6 + input_width);
400
401 o0 = o5;
402 o1 = (float*) ((uintptr_t) o0 + input_width);
403 o2 = (float*) ((uintptr_t) o1 + input_width);
404 o3 = (float*) ((uintptr_t) o2 + input_width);
405 o4 = (float*) ((uintptr_t) o3 + input_width);
406 o5 = (float*) ((uintptr_t) o4 + input_width);
407
408 output_height = doz(output_height, 6);
409 } while (output_height != 0);
410 }
411