1 // Auto-generated file. Do not edit!
2 // Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <wasm_simd128.h>
13
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/math.h>
16
17
18
xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_4x4(size_t input_height,size_t input_width,const float * input,const float * weights,const float * zero,float * output,uint32_t padding_top,const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_4x4(
20 size_t input_height,
21 size_t input_width,
22 const float* input,
23 const float* weights,
24 const float* zero,
25 float* output,
26 uint32_t padding_top,
27 const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28 {
29 assert(input_height != 0);
30 assert(input_width != 0);
31 assert(input_width % sizeof(float) == 0);
32 assert(padding_top == 1);
33
34 const v128_t vmask = wasm_v128_load(params->scalar.mask);
35 const v128_t vmax = wasm_v128_load32_splat(¶ms->scalar.max);
36 const v128_t vmin = wasm_v128_load32_splat(¶ms->scalar.min);
37
38 const v128_t vw0123 = wasm_v128_load(weights);
39 const v128_t vw4567 = wasm_v128_load(weights + 4);
40 const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
41
42 const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
43
44 const float* i0 = zero;
45 const float* i1 = input;
46 const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
47 const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
48 const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
49 const float* i5 = (const float*) ((uintptr_t) i4 + input_width);
50
51 float* o0 = output;
52 float* o1 = (float*) ((uintptr_t) o0 + input_width);
53 float* o2 = (float*) ((uintptr_t) o1 + input_width);
54 float* o3 = (float*) ((uintptr_t) o2 + input_width);
55
56 size_t output_height = input_height;
57 do {
58 if XNN_UNPREDICTABLE(output_height < 2) {
59 i2 = zero;
60 o1 = o0;
61 }
62 if XNN_UNPREDICTABLE(output_height < 3) {
63 i3 = zero;
64 o2 = o1;
65 }
66 if XNN_UNPREDICTABLE(output_height < 4) {
67 i4 = zero;
68 o3 = o2;
69 }
70 if XNN_UNPREDICTABLE(output_height < 5) {
71 i5 = zero;
72 }
73
74 v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
75 v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
76 v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
77 v128_t vi3x0123 = wasm_f32x4_const_splat(0.0f);
78 v128_t vi4x0123 = wasm_f32x4_const_splat(0.0f);
79 v128_t vi5x0123 = wasm_f32x4_const_splat(0.0f);
80
81 v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
82 v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
83 v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
84 v128_t vi3x4567 = wasm_v128_load(i3); i3 += 4;
85 v128_t vi4x4567 = wasm_v128_load(i4); i4 += 4;
86 v128_t vi5x4567 = wasm_v128_load(i5); i5 += 4;
87
88 size_t w = input_width;
89 for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
90 v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
91 v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
92 v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
93 v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
94
95 const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
96 const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
97 const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
98 const v128_t vi3x89AB = wasm_v128_load(i3); i3 += 4;
99 const v128_t vi4x89AB = wasm_v128_load(i4); i4 += 4;
100 const v128_t vi5x89AB = wasm_v128_load(i5); i5 += 4;
101
102 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
103 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
104 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
105 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
106
107 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
108 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
109 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
110 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
111
112 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
113 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
114 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
115 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
116
117 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
118 const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
119 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
120 const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
121 const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
122 const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
123
124 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
125 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
126 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
127 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
128
129 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
130 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
131 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
132 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
133
134 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
135 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
136 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
137 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
138
139 vi0x0123 = vi0x4567;
140 vi1x0123 = vi1x4567;
141 vi2x0123 = vi2x4567;
142 vi3x0123 = vi3x4567;
143 vi4x0123 = vi4x4567;
144 vi5x0123 = vi5x4567;
145
146 const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
147 const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
148 const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
149 const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vi3x89AB, 1, 2, 3, 4);
150 const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vi4x89AB, 1, 2, 3, 4);
151 const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vi5x89AB, 1, 2, 3, 4);
152
153 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
154 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
155 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
156 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
157
158 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
159 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
160 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
161 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
162
163 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
164 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
165 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
166 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
167
168 vi0x4567 = vi0x89AB;
169 vi1x4567 = vi1x89AB;
170 vi2x4567 = vi2x89AB;
171 vi3x4567 = vi3x89AB;
172 vi4x4567 = vi4x89AB;
173 vi5x4567 = vi5x89AB;
174
175
176 v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
177 v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
178 v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
179 v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
180 vo0 = wasm_f32x4_min(vo0, vmax);
181 vo1 = wasm_f32x4_min(vo1, vmax);
182 vo2 = wasm_f32x4_min(vo2, vmax);
183 vo3 = wasm_f32x4_min(vo3, vmax);
184
185 wasm_v128_store(o3, vo3); o3 += 4;
186 wasm_v128_store(o2, vo2); o2 += 4;
187 wasm_v128_store(o1, vo1); o1 += 4;
188 wasm_v128_store(o0, vo0); o0 += 4;
189 }
190 // Always process the last block of 1..4 pixels.
191 assert(w >= 1 * sizeof(float));
192 assert(w <= 4 * sizeof(float));
193 {
194 v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
195 v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
196 v128_t vo2p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
197 v128_t vo3p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
198
199 vi0x4567 = wasm_v128_and(vmask, vi0x4567);
200 vi1x4567 = wasm_v128_and(vmask, vi1x4567);
201 vi2x4567 = wasm_v128_and(vmask, vi2x4567);
202 vi3x4567 = wasm_v128_and(vmask, vi3x4567);
203 vi4x4567 = wasm_v128_and(vmask, vi4x4567);
204 vi5x4567 = wasm_v128_and(vmask, vi5x4567);
205
206 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
207 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
208 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
209 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
210
211 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
212 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
213 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
214 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
215
216 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
217 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
218 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
219 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
220
221 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
222 const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
223 const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
224 const v128_t vi3x3456 = wasm_v32x4_shuffle(vi3x0123, vi3x4567, 3, 4, 5, 6);
225 const v128_t vi4x3456 = wasm_v32x4_shuffle(vi4x0123, vi4x4567, 3, 4, 5, 6);
226 const v128_t vi5x3456 = wasm_v32x4_shuffle(vi5x0123, vi5x4567, 3, 4, 5, 6);
227
228 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
229 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
230 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
231 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
232
233 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
234 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
235 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
236 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
237
238 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
239 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
240 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
241 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
242
243 const v128_t vzero = wasm_f32x4_const_splat(0.0f);
244 const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
245 const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
246 const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
247 const v128_t vi3x5678 = wasm_v32x4_shuffle(vi3x4567, vzero, 1, 2, 3, 4);
248 const v128_t vi4x5678 = wasm_v32x4_shuffle(vi4x4567, vzero, 1, 2, 3, 4);
249 const v128_t vi5x5678 = wasm_v32x4_shuffle(vi5x4567, vzero, 1, 2, 3, 4);
250
251 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
252 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
253 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
254 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
255
256 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
257 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
258 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
259 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
260
261 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
262 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
263 vo2p0 = wasm_f32x4_add(vo2p0, wasm_f32x4_mul(vi4x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
264 vo3p0 = wasm_f32x4_add(vo3p0, wasm_f32x4_mul(vi5x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
265
266
267 v128_t vo0 = wasm_f32x4_max(vo0p0, vmin);
268 v128_t vo1 = wasm_f32x4_max(vo1p0, vmin);
269 v128_t vo2 = wasm_f32x4_max(vo2p0, vmin);
270 v128_t vo3 = wasm_f32x4_max(vo3p0, vmin);
271 vo0 = wasm_f32x4_min(vo0, vmax);
272 vo1 = wasm_f32x4_min(vo1, vmax);
273 vo2 = wasm_f32x4_min(vo2, vmax);
274 vo3 = wasm_f32x4_min(vo3, vmax);
275
276 if XNN_LIKELY(w == 4 * sizeof(float)) {
277 wasm_v128_store(o3, vo3); o3 += 4;
278 wasm_v128_store(o2, vo2); o2 += 4;
279 wasm_v128_store(o1, vo1); o1 += 4;
280 wasm_v128_store(o0, vo0); o0 += 4;
281 } else {
282 if (w & (2 * sizeof(float))) {
283 *((double*) o3) = wasm_f64x2_extract_lane(vo3, 0); o3 += 2;
284 *((double*) o2) = wasm_f64x2_extract_lane(vo2, 0); o2 += 2;
285 *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
286 *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
287
288 vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
289 vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
290 vo2 = wasm_v32x4_shuffle(vo2, vo2, 2, 3, 0, 1);
291 vo3 = wasm_v32x4_shuffle(vo3, vo3, 2, 3, 0, 1);
292 }
293 if (w & (1 * sizeof(float))) {
294 *o3 = wasm_f32x4_extract_lane(vo3, 0); o3 += 1;
295 *o2 = wasm_f32x4_extract_lane(vo2, 0); o2 += 1;
296 *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
297 *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
298 }
299 }
300 }
301
302 i0 = (const float*) ((uintptr_t) i4 - input_decrement);
303 i1 = (const float*) ((uintptr_t) i5 - input_decrement);
304 i2 = (const float*) ((uintptr_t) i1 + input_width);
305 i3 = (const float*) ((uintptr_t) i2 + input_width);
306 i4 = (const float*) ((uintptr_t) i3 + input_width);
307 i5 = (const float*) ((uintptr_t) i4 + input_width);
308
309 o0 = o3;
310 o1 = (float*) ((uintptr_t) o0 + input_width);
311 o2 = (float*) ((uintptr_t) o1 + input_width);
312 o3 = (float*) ((uintptr_t) o2 + input_width);
313
314 output_height = doz(output_height, 4);
315 } while (output_height != 0);
316 }
317