1 // Auto-generated file. Do not edit!
2 // Template: src/f32-dwconv2d-chw/5x5p2-wasmsimd-loadsplat.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <wasm_simd128.h>
13
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/math.h>
16
17
18
xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4_acc2(size_t input_height,size_t input_width,const float * input,const float * weights,const float * zero,float * output,uint32_t padding_top,const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_4x4_acc2(
20 size_t input_height,
21 size_t input_width,
22 const float* input,
23 const float* weights,
24 const float* zero,
25 float* output,
26 uint32_t padding_top,
27 const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28 {
29 assert(input_height != 0);
30 assert(input_width != 0);
31 assert(input_width % sizeof(float) == 0);
32 assert(padding_top == 2);
33
34 const v128_t vmask = wasm_v128_load(params->scalar.mask);
35 const v128_t vmax = wasm_v128_load32_splat(¶ms->scalar.max);
36 const v128_t vmin = wasm_v128_load32_splat(¶ms->scalar.min);
37
38 const v128_t vw0123 = wasm_v128_load(weights);
39 const v128_t vw4567 = wasm_v128_load(weights + 4);
40 const v128_t vw89AB = wasm_v128_load(weights + 8);
41 const v128_t vwCDEF = wasm_v128_load(weights + 12);
42 const v128_t vwGHIJ = wasm_v128_load(weights + 16);
43 const v128_t vwKLMN = wasm_v128_load(weights + 20);
44 const v128_t vwOP = wasm_v128_load64_splat(weights + 24);
45 const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
46 const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
47 const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
48 const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
49 const v128_t vk03 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
50 const v128_t vk04 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
51 const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
52 const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
53 const v128_t vk12 = wasm_v32x4_shuffle(vw89AB, vw89AB, 0, 0, 0, 0);
54 const v128_t vk13 = wasm_v32x4_shuffle(vw89AB, vw89AB, 1, 1, 1, 1);
55 const v128_t vk14 = wasm_v32x4_shuffle(vw89AB, vw89AB, 2, 2, 2, 2);
56 const v128_t vk20 = wasm_v32x4_shuffle(vw89AB, vw89AB, 3, 3, 3, 3);
57 const v128_t vk21 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 0, 0, 0, 0);
58 const v128_t vk22 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 1, 1, 1, 1);
59 const v128_t vk23 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 2, 2, 2, 2);
60 const v128_t vk24 = wasm_v32x4_shuffle(vwCDEF, vwCDEF, 3, 3, 3, 3);
61 const v128_t vk30 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 0, 0, 0, 0);
62 const v128_t vk31 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 1, 1, 1, 1);
63 const v128_t vk32 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 2, 2, 2, 2);
64 const v128_t vk33 = wasm_v32x4_shuffle(vwGHIJ, vwGHIJ, 3, 3, 3, 3);
65 const v128_t vk34 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 0, 0, 0, 0);
66 const v128_t vk40 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 1, 1, 1, 1);
67 const v128_t vk41 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 2, 2, 2, 2);
68 const v128_t vk42 = wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3, 3);
69 const v128_t vk43 = wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0);
70 const v128_t vk44 = wasm_v32x4_shuffle(vwOP, vwOP, 1, 1, 1, 1);
71
72 const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
73
74 const float* i0 = zero;
75 const float* i1 = zero;
76 const float* i2 = input;
77 const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
78 const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
79 const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
80 const float* i6 = (const float*) ((uintptr_t) i5 + input_width);
81 const float* i7 = (const float*) ((uintptr_t) i6 + input_width);
82
83 float* o0 = output;
84 float* o1 = (float*) ((uintptr_t) o0 + input_width);
85 float* o2 = (float*) ((uintptr_t) o1 + input_width);
86 float* o3 = (float*) ((uintptr_t) o2 + input_width);
87
88 size_t output_height = input_height;
89 do {
90 if XNN_UNPREDICTABLE(output_height < 2) {
91 i3 = zero;
92 o1 = o0;
93 }
94 if XNN_UNPREDICTABLE(output_height < 3) {
95 i4 = zero;
96 o2 = o1;
97 }
98 if XNN_UNPREDICTABLE(output_height < 4) {
99 i5 = zero;
100 o3 = o2;
101 }
102 if XNN_UNPREDICTABLE(output_height < 5) {
103 i6 = zero;
104 }
105 if XNN_UNPREDICTABLE(output_height < 6) {
106 i7 = zero;
107 }
108
109 v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
110 v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
111 v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
112 v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
113 v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
114 v128_t vi5x0123 = wasm_f32x4_const_splat(0.0f);
115 v128_t vi6x0123 = wasm_f32x4_const_splat(0.0f);
116 v128_t vi7x0123 = wasm_f32x4_const_splat(0.0f);
117
118 v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
119 v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
120 v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
121 v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
122 v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
123 v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
124 v128_t vi6x4567 = wasm_v128_load(i6); i6 += 4;
125 v128_t vi7x4567 = wasm_v128_load(i7); i7 += 4;
126
127 size_t w = input_width;
128 for (; w > 8 * sizeof(float); w -= 4 * sizeof(float)) {
129 v128_t vo0p0 = vbias;
130 v128_t vo1p0 = vbias;
131 v128_t vo2p0 = vbias;
132 v128_t vo3p0 = vbias;
133
134 const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
135 const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
136 const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
137 const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
138 const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
139 const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
140 const v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
141 const v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4;
142
143 v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
144 v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, vk02);
145 v128_t vo2p1 = wasm_f32x4_mul(vi2x4567, vk02);
146 v128_t vo3p1 = wasm_f32x4_mul(vi3x4567, vk02);
147
148 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
149 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
150 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk12));
151 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk12));
152
153 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
154 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
155 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk22));
156 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk22));
157
158 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
159 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, vk32));
160 vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x4567, vk32));
161 vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x4567, vk32));
162
163 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
164 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
165 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, vk42));
166 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, vk42));
167
168 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
169 const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
170 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
171 const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
172 const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
173 const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
174 const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
175 const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
176
177 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
178 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk01));
179 vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x3456, vk01));
180 vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi3x3456, vk01));
181
182 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
183 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
184 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk11));
185 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk11));
186
187 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
188 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, vk21));
189 vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x3456, vk21));
190 vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi5x3456, vk21));
191
192 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
193 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
194 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, vk31));
195 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, vk31));
196
197 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
198 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, vk41));
199 vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x3456, vk41));
200 vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi7x3456, vk41));
201
202 const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
203 vi0x0123 = vi0x4567;
204 const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
205 vi1x0123 = vi1x4567;
206 const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
207 vi2x0123 = vi2x4567;
208 const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
209 vi3x0123 = vi3x4567;
210 const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
211 vi4x0123 = vi4x4567;
212 const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
213 vi5x0123 = vi5x4567;
214 const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
215 vi6x0123 = vi6x4567;
216 const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
217 vi7x0123 = vi7x4567;
218
219 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
220 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
221 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, vk00));
222 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, vk00));
223
224 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
225 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, vk10));
226 vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x2345, vk10));
227 vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi4x2345, vk10));
228
229 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
230 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
231 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, vk20));
232 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, vk20));
233
234 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, vk30));
235 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, vk30));
236 vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x2345, vk30));
237 vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x2345, vk30));
238
239 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
240 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
241 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, vk40));
242 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, vk40));
243
244 const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
245 const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
246 const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
247 const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
248 const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
249 const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
250 const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
251 const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4);
252
253 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
254 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, vk03));
255 vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x5678, vk03));
256 vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi3x5678, vk03));
257
258 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
259 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
260 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk13));
261 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk13));
262
263 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
264 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, vk23));
265 vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x5678, vk23));
266 vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi5x5678, vk23));
267
268 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
269 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
270 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, vk33));
271 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, vk33));
272
273 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
274 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, vk43));
275 vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x5678, vk43));
276 vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi7x5678, vk43));
277
278 const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
279 vi0x4567 = vi0x89AB;
280 const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
281 vi1x4567 = vi1x89AB;
282 const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
283 vi2x4567 = vi2x89AB;
284 const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
285 vi3x4567 = vi3x89AB;
286 const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
287 vi4x4567 = vi4x89AB;
288 const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
289 vi5x4567 = vi5x89AB;
290 const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
291 vi6x4567 = vi6x89AB;
292 const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5);
293 vi7x4567 = vi7x89AB;
294
295 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
296 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
297 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, vk04));
298 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, vk04));
299
300 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, vk14));
301 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, vk14));
302 vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x6789, vk14));
303 vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi4x6789, vk14));
304
305 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
306 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
307 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, vk24));
308 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, vk24));
309
310 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
311 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, vk34));
312 vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x6789, vk34));
313 vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x6789, vk34));
314
315 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
316 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
317 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, vk44));
318 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, vk44));
319
320 vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
321 vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
322 vo2p0 = wasm_f32x4_add(vo2p0, vo2p1);
323 vo3p0 = wasm_f32x4_add(vo3p0, vo3p1);
324
325 v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
326 v128_t vo1 = wasm_f32x4_pmax(vmin, vo1p0);
327 v128_t vo2 = wasm_f32x4_pmax(vmin, vo2p0);
328 v128_t vo3 = wasm_f32x4_pmax(vmin, vo3p0);
329 vo0 = wasm_f32x4_pmin(vmax, vo0);
330 vo1 = wasm_f32x4_pmin(vmax, vo1);
331 vo2 = wasm_f32x4_pmin(vmax, vo2);
332 vo3 = wasm_f32x4_pmin(vmax, vo3);
333
334 wasm_v128_store(o3, vo3); o3 += 4;
335 wasm_v128_store(o2, vo2); o2 += 4;
336 wasm_v128_store(o1, vo1); o1 += 4;
337 wasm_v128_store(o0, vo0); o0 += 4;
338 }
339 // Always process the last block of 5..8 pixels.
340 if XNN_LIKELY(w > 4 * sizeof(float)) {
341 v128_t vo0p0 = vbias;
342 v128_t vo1p0 = vbias;
343 v128_t vo2p0 = vbias;
344 v128_t vo3p0 = vbias;
345
346 v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
347 v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
348 v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
349 v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
350 v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
351 v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
352 v128_t vi6x89AB = wasm_v128_load(i6); i6 += 4;
353 v128_t vi7x89AB = wasm_v128_load(i7); i7 += 4;
354
355 vi0x89AB = wasm_v128_and(vmask, vi0x89AB);
356 vi1x89AB = wasm_v128_and(vmask, vi1x89AB);
357 vi2x89AB = wasm_v128_and(vmask, vi2x89AB);
358 vi3x89AB = wasm_v128_and(vmask, vi3x89AB);
359 vi4x89AB = wasm_v128_and(vmask, vi4x89AB);
360 vi5x89AB = wasm_v128_and(vmask, vi5x89AB);
361 vi6x89AB = wasm_v128_and(vmask, vi6x89AB);
362 vi7x89AB = wasm_v128_and(vmask, vi7x89AB);
363
364 v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
365 v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, vk02);
366 v128_t vo2p1 = wasm_f32x4_mul(vi2x4567, vk02);
367 v128_t vo3p1 = wasm_f32x4_mul(vi3x4567, vk02);
368
369 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
370 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
371 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk12));
372 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk12));
373
374 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
375 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
376 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk22));
377 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk22));
378
379 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
380 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, vk32));
381 vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x4567, vk32));
382 vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x4567, vk32));
383
384 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
385 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
386 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, vk42));
387 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, vk42));
388
389 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
390 const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
391 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
392 const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
393 const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
394 const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
395 const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
396 const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
397
398 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
399 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk01));
400 vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x3456, vk01));
401 vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi3x3456, vk01));
402
403 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
404 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
405 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk11));
406 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk11));
407
408 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
409 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, vk21));
410 vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x3456, vk21));
411 vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi5x3456, vk21));
412
413 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
414 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
415 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, vk31));
416 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, vk31));
417
418 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
419 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, vk41));
420 vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x3456, vk41));
421 vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi7x3456, vk41));
422
423 const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
424 vi0x0123 = vi0x4567;
425 const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
426 vi1x0123 = vi1x4567;
427 const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
428 vi2x0123 = vi2x4567;
429 const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
430 vi3x0123 = vi3x4567;
431 const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
432 vi4x0123 = vi4x4567;
433 const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
434 vi5x0123 = vi5x4567;
435 const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
436 vi6x0123 = vi6x4567;
437 const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
438 vi7x0123 = vi7x4567;
439
440 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
441 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
442 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, vk00));
443 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, vk00));
444
445 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
446 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, vk10));
447 vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x2345, vk10));
448 vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi4x2345, vk10));
449
450 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
451 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
452 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, vk20));
453 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, vk20));
454
455 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, vk30));
456 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, vk30));
457 vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x2345, vk30));
458 vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x2345, vk30));
459
460 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
461 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
462 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, vk40));
463 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, vk40));
464
465 const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
466 const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
467 const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
468 const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
469 const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
470 const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
471 const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 1, 2, 3, 4);
472 const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 1, 2, 3, 4);
473
474 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
475 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, vk03));
476 vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x5678, vk03));
477 vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi3x5678, vk03));
478
479 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
480 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
481 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk13));
482 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk13));
483
484 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
485 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, vk23));
486 vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x5678, vk23));
487 vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi5x5678, vk23));
488
489 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
490 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
491 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, vk33));
492 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, vk33));
493
494 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
495 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, vk43));
496 vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x5678, vk43));
497 vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi7x5678, vk43));
498
499 const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 2, 3, 4, 5);
500 vi0x4567 = vi0x89AB;
501 const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 2, 3, 4, 5);
502 vi1x4567 = vi1x89AB;
503 const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 2, 3, 4, 5);
504 vi2x4567 = vi2x89AB;
505 const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 2, 3, 4, 5);
506 vi3x4567 = vi3x89AB;
507 const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 2, 3, 4, 5);
508 vi4x4567 = vi4x89AB;
509 const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 2, 3, 4, 5);
510 vi5x4567 = vi5x89AB;
511 const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x4567, vi6x89AB, 2, 3, 4, 5);
512 vi6x4567 = vi6x89AB;
513 const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x4567, vi7x89AB, 2, 3, 4, 5);
514 vi7x4567 = vi7x89AB;
515
516 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
517 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
518 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, vk04));
519 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, vk04));
520
521 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, vk14));
522 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, vk14));
523 vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x6789, vk14));
524 vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi4x6789, vk14));
525
526 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
527 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
528 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, vk24));
529 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, vk24));
530
531 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
532 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, vk34));
533 vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x6789, vk34));
534 vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x6789, vk34));
535
536 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
537 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
538 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, vk44));
539 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, vk44));
540
541 vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
542 vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
543 vo2p0 = wasm_f32x4_add(vo2p0, vo2p1);
544 vo3p0 = wasm_f32x4_add(vo3p0, vo3p1);
545
546 v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
547 v128_t vo1 = wasm_f32x4_pmax(vmin, vo1p0);
548 v128_t vo2 = wasm_f32x4_pmax(vmin, vo2p0);
549 v128_t vo3 = wasm_f32x4_pmax(vmin, vo3p0);
550 vo0 = wasm_f32x4_pmin(vmax, vo0);
551 vo1 = wasm_f32x4_pmin(vmax, vo1);
552 vo2 = wasm_f32x4_pmin(vmax, vo2);
553 vo3 = wasm_f32x4_pmin(vmax, vo3);
554
555 wasm_v128_store(o3, vo3); o3 += 4;
556 wasm_v128_store(o2, vo2); o2 += 4;
557 wasm_v128_store(o1, vo1); o1 += 4;
558 wasm_v128_store(o0, vo0); o0 += 4;
559
560 w -= 4 * sizeof(float);
561 }
562 assert(w >= 1 * sizeof(float));
563 assert(w <= 4 * sizeof(float));
564 {
565 v128_t vo0p0 = vbias;
566 v128_t vo1p0 = vbias;
567 v128_t vo2p0 = vbias;
568 v128_t vo3p0 = vbias;
569
570 vi0x4567 = wasm_v128_and(vmask, vi0x4567);
571 vi1x4567 = wasm_v128_and(vmask, vi1x4567);
572 vi2x4567 = wasm_v128_and(vmask, vi2x4567);
573 vi3x4567 = wasm_v128_and(vmask, vi3x4567);
574 vi4x4567 = wasm_v128_and(vmask, vi4x4567);
575 vi5x4567 = wasm_v128_and(vmask, vi5x4567);
576 vi6x4567 = wasm_v128_and(vmask, vi6x4567);
577 vi7x4567 = wasm_v128_and(vmask, vi7x4567);
578
579 v128_t vo0p1 = wasm_f32x4_mul(vi0x4567, vk02);
580 v128_t vo1p1 = wasm_f32x4_mul(vi1x4567, vk02);
581 v128_t vo2p1 = wasm_f32x4_mul(vi2x4567, vk02);
582 v128_t vo3p1 = wasm_f32x4_mul(vi3x4567, vk02);
583
584 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, vk12));
585 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, vk12));
586 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, vk12));
587 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, vk12));
588
589 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, vk22));
590 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, vk22));
591 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, vk22));
592 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, vk22));
593
594 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x4567, vk32));
595 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x4567, vk32));
596 vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x4567, vk32));
597 vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x4567, vk32));
598
599 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x4567, vk42));
600 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x4567, vk42));
601 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x4567, vk42));
602 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x4567, vk42));
603
604 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
605 const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
606 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
607 const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
608 const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
609 const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
610 const v128_t vi6x3456 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 3, 4, 5, 6);
611 const v128_t vi7x3456 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 3, 4, 5, 6);
612
613 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x3456, vk01));
614 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x3456, vk01));
615 vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x3456, vk01));
616 vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi3x3456, vk01));
617
618 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, vk11));
619 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, vk11));
620 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, vk11));
621 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, vk11));
622
623 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x3456, vk21));
624 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x3456, vk21));
625 vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x3456, vk21));
626 vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi5x3456, vk21));
627
628 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x3456, vk31));
629 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x3456, vk31));
630 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x3456, vk31));
631 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x3456, vk31));
632
633 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x3456, vk41));
634 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x3456, vk41));
635 vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x3456, vk41));
636 vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi7x3456, vk41));
637
638 const v128_t vi0x2345 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 2, 3, 4, 5);
639 const v128_t vi1x2345 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 2, 3, 4, 5);
640 const v128_t vi2x2345 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 2, 3, 4, 5);
641 const v128_t vi3x2345 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 2, 3, 4, 5);
642 const v128_t vi4x2345 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 2, 3, 4, 5);
643 const v128_t vi5x2345 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 2, 3, 4, 5);
644 const v128_t vi6x2345 = wasm_v32x4_shuffle(vi6x0123, vi6x4567, 2, 3, 4, 5);
645 const v128_t vi7x2345 = wasm_v32x4_shuffle(vi7x0123, vi7x4567, 2, 3, 4, 5);
646
647 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x2345, vk00));
648 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x2345, vk00));
649 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x2345, vk00));
650 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x2345, vk00));
651
652 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x2345, vk10));
653 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x2345, vk10));
654 vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x2345, vk10));
655 vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi4x2345, vk10));
656
657 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x2345, vk20));
658 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x2345, vk20));
659 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x2345, vk20));
660 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x2345, vk20));
661
662 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x2345, vk30));
663 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x2345, vk30));
664 vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x2345, vk30));
665 vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x2345, vk30));
666
667 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x2345, vk40));
668 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x2345, vk40));
669 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x2345, vk40));
670 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x2345, vk40));
671
672 const v128_t vzero = wasm_f32x4_const_splat(0.0f);
673 const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
674 const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
675 const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
676 const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
677 const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
678 const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
679 const v128_t vi6x5678 = wasm_v32x4_shuffle(vi6x4567, vzero, 1, 2, 3, 4);
680 const v128_t vi7x5678 = wasm_v32x4_shuffle(vi7x4567, vzero, 1, 2, 3, 4);
681
682 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x5678, vk03));
683 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi1x5678, vk03));
684 vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi2x5678, vk03));
685 vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi3x5678, vk03));
686
687 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, vk13));
688 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, vk13));
689 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, vk13));
690 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, vk13));
691
692 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x5678, vk23));
693 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi3x5678, vk23));
694 vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi4x5678, vk23));
695 vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi5x5678, vk23));
696
697 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi3x5678, vk33));
698 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x5678, vk33));
699 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi5x5678, vk33));
700 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi6x5678, vk33));
701
702 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi4x5678, vk43));
703 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi5x5678, vk43));
704 vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi6x5678, vk43));
705 vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi7x5678, vk43));
706
707 const v128_t vi0x6789 = wasm_v32x4_shuffle(vi0x5678, vzero, 1, 2, 3, 4);
708 const v128_t vi1x6789 = wasm_v32x4_shuffle(vi1x5678, vzero, 1, 2, 3, 4);
709 const v128_t vi2x6789 = wasm_v32x4_shuffle(vi2x5678, vzero, 1, 2, 3, 4);
710 const v128_t vi3x6789 = wasm_v32x4_shuffle(vi3x5678, vzero, 1, 2, 3, 4);
711 const v128_t vi4x6789 = wasm_v32x4_shuffle(vi4x5678, vzero, 1, 2, 3, 4);
712 const v128_t vi5x6789 = wasm_v32x4_shuffle(vi5x5678, vzero, 1, 2, 3, 4);
713 const v128_t vi6x6789 = wasm_v32x4_shuffle(vi6x5678, vzero, 1, 2, 3, 4);
714 const v128_t vi7x6789 = wasm_v32x4_shuffle(vi7x5678, vzero, 1, 2, 3, 4);
715
716 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x6789, vk04));
717 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x6789, vk04));
718 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x6789, vk04));
719 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x6789, vk04));
720
721 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x6789, vk14));
722 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi2x6789, vk14));
723 vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi3x6789, vk14));
724 vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi4x6789, vk14));
725
726 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x6789, vk24));
727 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x6789, vk24));
728 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x6789, vk24));
729 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x6789, vk24));
730
731 vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi3x6789, vk34));
732 vo1p1 = wasm_f32x4_add(vo1p1, wasm_f32x4_mul(vi4x6789, vk34));
733 vo2p1 = wasm_f32x4_add(vo2p1, wasm_f32x4_mul(vi5x6789, vk34));
734 vo3p1 = wasm_f32x4_add(vo3p1, wasm_f32x4_mul(vi6x6789, vk34));
735
736 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi4x6789, vk44));
737 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi5x6789, vk44));
738 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi6x6789, vk44));
739 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi7x6789, vk44));
740
741 vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
742 vo1p0 = wasm_f32x4_add(vo1p0, vo1p1);
743 vo2p0 = wasm_f32x4_add(vo2p0, vo2p1);
744 vo3p0 = wasm_f32x4_add(vo3p0, vo3p1);
745
746 v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
747 v128_t vo1 = wasm_f32x4_pmax(vmin, vo1p0);
748 v128_t vo2 = wasm_f32x4_pmax(vmin, vo2p0);
749 v128_t vo3 = wasm_f32x4_pmax(vmin, vo3p0);
750 vo0 = wasm_f32x4_pmin(vmax, vo0);
751 vo1 = wasm_f32x4_pmin(vmax, vo1);
752 vo2 = wasm_f32x4_pmin(vmax, vo2);
753 vo3 = wasm_f32x4_pmin(vmax, vo3);
754
755 if XNN_LIKELY(w & (4 * sizeof(float))) {
756 wasm_v128_store(o3, vo3); o3 += 4;
757 wasm_v128_store(o2, vo2); o2 += 4;
758 wasm_v128_store(o1, vo1); o1 += 4;
759 wasm_v128_store(o0, vo0); o0 += 4;
760 } else {
761 if (w & (2 * sizeof(float))) {
762 *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0); o3 += 2;
763 *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
764 *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
765 *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
766
767 vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
768 vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
769 vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
770 vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
771 }
772 if (w & (1 * sizeof(float))) {
773 *o3 = wasm_f32x4_extract_lane(vo3, 0); o3 += 1;
774 *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
775 *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
776 *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
777 }
778 }
779 }
780
781 i0 = (const float*) ((uintptr_t) i4 - input_decrement);
782 i1 = (const float*) ((uintptr_t) i5 - input_decrement);
783 i2 = (const float*) ((uintptr_t) i1 + input_width);
784 i3 = (const float*) ((uintptr_t) i2 + input_width);
785 i4 = (const float*) ((uintptr_t) i3 + input_width);
786 i5 = (const float*) ((uintptr_t) i4 + input_width);
787 i6 = (const float*) ((uintptr_t) i5 + input_width);
788 i7 = (const float*) ((uintptr_t) i6 + input_width);
789
790 o0 = o3;
791 o1 = (float*) ((uintptr_t) o0 + input_width);
792 o2 = (float*) ((uintptr_t) o1 + input_width);
793 o3 = (float*) ((uintptr_t) o2 + input_width);
794
795 output_height = doz(output_height, 4);
796 } while (output_height != 0);
797 }
798