1 // Auto-generated file. Do not edit!
2 // Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <wasm_simd128.h>
13
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/math.h>
16
17
18
xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_2x4(size_t input_height,size_t input_width,const float * input,const float * weights,const float * zero,float * output,uint32_t padding_top,const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_2x4(
20 size_t input_height,
21 size_t input_width,
22 const float* input,
23 const float* weights,
24 const float* zero,
25 float* output,
26 uint32_t padding_top,
27 const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28 {
29 assert(input_height != 0);
30 assert(input_width != 0);
31 assert(input_width % sizeof(float) == 0);
32 assert(padding_top >= 0);
33 assert(padding_top <= 1);
34
35 const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
36 const v128_t vmask_odd = wasm_v128_load(params->scalar.mask_odd);
37 const v128_t vmax = wasm_v128_load32_splat(¶ms->scalar.max);
38 const v128_t vmin = wasm_v128_load32_splat(¶ms->scalar.min);
39
40 const v128_t vw0123 = wasm_v128_load(weights);
41 const v128_t vw4567 = wasm_v128_load(weights + 4);
42 const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
43
44 const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
45 const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
46
47 const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
48 const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
49 if XNN_UNPREDICTABLE(padding_top != 0) {
50 i0 = zero;
51 }
52 const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
53 const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
54 const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
55
56 float* o0 = output;
57 float* o1 = (float*) ((uintptr_t) o0 + output_width);
58
59 size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
60 size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
61 do {
62 if XNN_UNPREDICTABLE(padded_input_height < 4) {
63 i2 = zero;
64 }
65 if XNN_UNPREDICTABLE(padded_input_height < 5) {
66 i3 = zero;
67 o1 = o0;
68 }
69 if XNN_UNPREDICTABLE(padded_input_height < 6) {
70 i4 = zero;
71 }
72
73 v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
74 v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
75 v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
76 v128_t vi3x1357 = wasm_f32x4_const_splat(0.0f);
77 v128_t vi4x1357 = wasm_f32x4_const_splat(0.0f);
78
79 size_t w = input_width;
80 for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
81 v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
82 v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
83
84 const v128_t vi0x89AB = wasm_v128_load(i0);
85 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
86 i0 += 8;
87 const v128_t vi1x89AB = wasm_v128_load(i1);
88 const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
89 i1 += 8;
90 const v128_t vi2x89AB = wasm_v128_load(i2);
91 const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
92 i2 += 8;
93 const v128_t vi3x89AB = wasm_v128_load(i3);
94 const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
95 i3 += 8;
96 const v128_t vi4x89AB = wasm_v128_load(i4);
97 const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
98 i4 += 8;
99
100 const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
101 const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
102 const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
103 const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
104 const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
105 const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
106 const v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
107 const v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
108 const v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
109 const v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
110
111 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
112 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
113
114 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
115 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
116
117 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
118 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
119
120 const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
121 vi0x1357 = vi0x9BDF;
122 const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
123 vi1x1357 = vi1x9BDF;
124 const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
125 vi2x1357 = vi2x9BDF;
126 const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
127 vi3x1357 = vi3x9BDF;
128 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
129 vi4x1357 = vi4x9BDF;
130
131 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
132 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
133
134 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
135 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
136
137 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
138 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
139
140 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
141 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
142
143 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
144 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
145
146 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
147 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
148
149
150 v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
151 v128_t vo1 = wasm_f32x4_pmax(vmin, vo1p0);
152 vo0 = wasm_f32x4_pmin(vmax, vo0);
153 vo1 = wasm_f32x4_pmin(vmax, vo1);
154
155 wasm_v128_store(o1, vo1); o1 += 4;
156 wasm_v128_store(o0, vo0); o0 += 4;
157 }
158 // Last block has 0-7 pixels to process.
159 assert(w < 8 * sizeof(float));
160 if XNN_LIKELY(w != 0) {
161 v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
162 v128_t vo1p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
163
164 const v128_t vi0x89AB = wasm_v128_load(i0);
165 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
166 const v128_t vi1x89AB = wasm_v128_load(i1);
167 const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
168 const v128_t vi2x89AB = wasm_v128_load(i2);
169 const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
170 const v128_t vi3x89AB = wasm_v128_load(i3);
171 const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
172 const v128_t vi4x89AB = wasm_v128_load(i4);
173 const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
174
175 const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
176 const v128_t vi0x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
177 const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
178 const v128_t vi1x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
179 const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
180 const v128_t vi2x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
181 const v128_t vi3x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6));
182 const v128_t vi3x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7));
183 const v128_t vi4x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6));
184 const v128_t vi4x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7));
185
186 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
187 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
188
189 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
190 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
191
192 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
193 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
194
195 const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
196 const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
197 const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
198 const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
199 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
200
201 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
202 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
203
204 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
205 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
206
207 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
208 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
209
210 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
211 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
212
213 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
214 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
215
216 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
217 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
218
219
220 v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
221 v128_t vo1 = wasm_f32x4_pmax(vmin, vo1p0);
222 vo0 = wasm_f32x4_pmin(vmax, vo0);
223 vo1 = wasm_f32x4_pmin(vmax, vo1);
224
225 w += 1 * sizeof(float);
226 if (w & (8 * sizeof(float))) {
227 wasm_v128_store(o1, vo1); o1 += 4;
228 wasm_v128_store(o0, vo0); o0 += 4;
229 } else {
230 if (w & (4 * sizeof(float))) {
231 *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
232 *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
233
234 vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
235 vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
236 }
237 if (w & (2 * sizeof(float))) {
238 *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
239 *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
240 }
241 }
242 }
243
244 i0 = (const float*) ((uintptr_t) i4 - input_decrement);
245 i1 = (const float*) ((uintptr_t) i0 + input_width);
246 i2 = (const float*) ((uintptr_t) i1 + input_width);
247 i3 = (const float*) ((uintptr_t) i2 + input_width);
248 i4 = (const float*) ((uintptr_t) i3 + input_width);
249
250 o0 = o1;
251 o1 = (float*) ((uintptr_t) o0 + output_width);
252
253 output_height = doz(output_height, 2);
254 padded_input_height = doz(padded_input_height, 4);
255 } while (output_height != 0);
256 }
257