1 // Auto-generated file. Do not edit!
2 // Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-loadsplat.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <wasm_simd128.h>
13
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/math.h>
16
17
18
xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_2x4(size_t input_height,size_t input_width,const float * input,const float * weights,const float * zero,float * output,uint32_t padding_top,const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_2x4(
20 size_t input_height,
21 size_t input_width,
22 const float* input,
23 const float* weights,
24 const float* zero,
25 float* output,
26 uint32_t padding_top,
27 const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28 {
29 assert(input_height != 0);
30 assert(input_width != 0);
31 assert(input_width % sizeof(float) == 0);
32 assert(padding_top >= 0);
33 assert(padding_top <= 1);
34
35 const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
36 const v128_t vmask_odd = wasm_v128_load(params->scalar.mask_odd);
37 const v128_t vmax = wasm_v128_load32_splat(¶ms->scalar.max);
38 const v128_t vmin = wasm_v128_load32_splat(¶ms->scalar.min);
39
40 const v128_t vw0123 = wasm_v128_load(weights);
41 const v128_t vw4567 = wasm_v128_load(weights + 4);
42 const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
43 const v128_t vbias = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
44 const v128_t vk00 = wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1);
45 const v128_t vk01 = wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2);
46 const v128_t vk02 = wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3);
47 const v128_t vk10 = wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0);
48 const v128_t vk11 = wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1);
49 const v128_t vk12 = wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2);
50 const v128_t vk20 = wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3);
51 const v128_t vk21 = wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0);
52 const v128_t vk22 = wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1);
53
54 const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
55 const size_t output_width = round_down_po2((input_width + (2 /* padding */ - 3 /* kernel size */ + 2 /* subsampling */) * sizeof(float)) / 2, sizeof(float));
56
57 const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
58 const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
59 if XNN_UNPREDICTABLE(padding_top != 0) {
60 i0 = zero;
61 }
62 const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
63 const float* i3 = (const float*) ((uintptr_t) i2 + input_width);
64 const float* i4 = (const float*) ((uintptr_t) i3 + input_width);
65
66 float* o0 = output;
67 float* o1 = (float*) ((uintptr_t) o0 + output_width);
68
69 size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
70 size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
71 do {
72 if XNN_UNPREDICTABLE(padded_input_height < 4) {
73 i2 = zero;
74 }
75 if XNN_UNPREDICTABLE(padded_input_height < 5) {
76 i3 = zero;
77 o1 = o0;
78 }
79 if XNN_UNPREDICTABLE(padded_input_height < 6) {
80 i4 = zero;
81 }
82
83 v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
84 v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
85 v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
86 v128_t vi3x1357 = wasm_f32x4_const_splat(0.0f);
87 v128_t vi4x1357 = wasm_f32x4_const_splat(0.0f);
88
89 size_t w = input_width;
90 for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
91 v128_t vo0p0 = vbias;
92 v128_t vo1p0 = vbias;
93
94 const v128_t vi0x89AB = wasm_v128_load(i0);
95 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
96 i0 += 8;
97 const v128_t vi1x89AB = wasm_v128_load(i1);
98 const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
99 i1 += 8;
100 const v128_t vi2x89AB = wasm_v128_load(i2);
101 const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
102 i2 += 8;
103 const v128_t vi3x89AB = wasm_v128_load(i3);
104 const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
105 i3 += 8;
106 const v128_t vi4x89AB = wasm_v128_load(i4);
107 const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
108 i4 += 8;
109
110 const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
111 const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
112 const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
113 const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
114 const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
115 const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
116 const v128_t vi3x8ACE = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6);
117 const v128_t vi3x9BDF = wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7);
118 const v128_t vi4x8ACE = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6);
119 const v128_t vi4x9BDF = wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7);
120
121 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk01));
122 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, vk01));
123
124 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
125 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk11));
126
127 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
128 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk21));
129
130 const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
131 vi0x1357 = vi0x9BDF;
132 const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
133 vi1x1357 = vi1x9BDF;
134 const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
135 vi2x1357 = vi2x9BDF;
136 const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
137 vi3x1357 = vi3x9BDF;
138 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
139 vi4x1357 = vi4x9BDF;
140
141 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, vk00));
142 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x7BDF, vk00));
143
144 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, vk10));
145 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, vk10));
146
147 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, vk20));
148 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, vk20));
149
150 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
151 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, vk02));
152
153 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk12));
154 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, vk12));
155
156 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
157 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, vk22));
158
159
160 v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
161 v128_t vo1 = wasm_f32x4_pmax(vmin, vo1p0);
162 vo0 = wasm_f32x4_pmin(vmax, vo0);
163 vo1 = wasm_f32x4_pmin(vmax, vo1);
164
165 wasm_v128_store(o1, vo1); o1 += 4;
166 wasm_v128_store(o0, vo0); o0 += 4;
167 }
168 // Last block has 0-7 pixels to process.
169 assert(w < 8 * sizeof(float));
170 if XNN_LIKELY(w != 0) {
171 v128_t vo0p0 = vbias;
172 v128_t vo1p0 = vbias;
173
174 const v128_t vi0x89AB = wasm_v128_load(i0);
175 const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
176 const v128_t vi1x89AB = wasm_v128_load(i1);
177 const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
178 const v128_t vi2x89AB = wasm_v128_load(i2);
179 const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
180 const v128_t vi3x89AB = wasm_v128_load(i3);
181 const v128_t vi3xCDEF = wasm_v128_load(i3 + 4);
182 const v128_t vi4x89AB = wasm_v128_load(i4);
183 const v128_t vi4xCDEF = wasm_v128_load(i4 + 4);
184
185 const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
186 const v128_t vi0x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
187 const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
188 const v128_t vi1x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
189 const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
190 const v128_t vi2x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
191 const v128_t vi3x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 0, 2, 4, 6));
192 const v128_t vi3x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi3x89AB, vi3xCDEF, 1, 3, 5, 7));
193 const v128_t vi4x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 0, 2, 4, 6));
194 const v128_t vi4x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi4x89AB, vi4xCDEF, 1, 3, 5, 7));
195
196 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x8ACE, vk01));
197 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x8ACE, vk01));
198
199 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, vk11));
200 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x8ACE, vk11));
201
202 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, vk21));
203 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x8ACE, vk21));
204
205 const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
206 const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
207 const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
208 const v128_t vi3x7BDF = wasm_v32x4_shuffle(vi3x1357, vi3x9BDF, 3, 4, 5, 6);
209 const v128_t vi4x7BDF = wasm_v32x4_shuffle(vi4x1357, vi4x9BDF, 3, 4, 5, 6);
210
211 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x7BDF, vk00));
212 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x7BDF, vk00));
213
214 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, vk10));
215 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x7BDF, vk10));
216
217 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x7BDF, vk20));
218 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x7BDF, vk20));
219
220 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, vk02));
221 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi2x9BDF, vk02));
222
223 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x9BDF, vk12));
224 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi3x9BDF, vk12));
225
226 vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, vk22));
227 vo1p0 = wasm_f32x4_add(vo1p0, wasm_f32x4_mul(vi4x9BDF, vk22));
228
229
230 v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
231 v128_t vo1 = wasm_f32x4_pmax(vmin, vo1p0);
232 vo0 = wasm_f32x4_pmin(vmax, vo0);
233 vo1 = wasm_f32x4_pmin(vmax, vo1);
234
235 w += 1 * sizeof(float);
236 if (w & (8 * sizeof(float))) {
237 wasm_v128_store(o1, vo1); o1 += 4;
238 wasm_v128_store(o0, vo0); o0 += 4;
239 } else {
240 if (w & (4 * sizeof(float))) {
241 *((double*) o1) = wasm_f64x2_extract_lane(vo1, 0); o1 += 2;
242 *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
243
244 vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
245 vo1 = wasm_v32x4_shuffle(vo1, vo1, 2, 3, 0, 1);
246 }
247 if (w & (2 * sizeof(float))) {
248 *o1 = wasm_f32x4_extract_lane(vo1, 0); o1 += 1;
249 *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
250 }
251 }
252 }
253
254 i0 = (const float*) ((uintptr_t) i4 - input_decrement);
255 i1 = (const float*) ((uintptr_t) i0 + input_width);
256 i2 = (const float*) ((uintptr_t) i1 + input_width);
257 i3 = (const float*) ((uintptr_t) i2 + input_width);
258 i4 = (const float*) ((uintptr_t) i3 + input_width);
259
260 o0 = o1;
261 o1 = (float*) ((uintptr_t) o0 + output_width);
262
263 output_height = doz(output_height, 2);
264 padded_input_height = doz(padded_input_height, 4);
265 } while (output_height != 0);
266 }
267