1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-dwconv2d-chw/3x3s2p1-wasmsimd-splat.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/math.h>
16 
17 
18 
xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc2(size_t input_height,size_t input_width,const float * input,const float * weights,const float * zero,float * output,uint32_t padding_top,const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc2(
20     size_t input_height,
21     size_t input_width,
22     const float* input,
23     const float* weights,
24     const float* zero,
25     float* output,
26     uint32_t padding_top,
27     const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28 {
29   assert(input_height != 0);
30   assert(input_width != 0);
31   assert(input_width % sizeof(float) == 0);
32   assert(padding_top >= 0);
33   assert(padding_top <= 1);
34 
35   const v128_t vmask_even = wasm_v128_load(params->scalar.mask_even);
36   const v128_t vmask_odd  = wasm_v128_load(params->scalar.mask_odd);
37   const v128_t vmax = wasm_v128_load32_splat(&params->scalar.max);
38   const v128_t vmin = wasm_v128_load32_splat(&params->scalar.min);
39 
40   const v128_t vw0123 = wasm_v128_load(weights);
41   const v128_t vw4567 = wasm_v128_load(weights + 4);
42   const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
43 
44   const size_t input_decrement = round_down_po2(input_width, 4 /* SIMD output width */ * 2 /* subsampling */ * sizeof(float));
45 
46   const float* i0 = (const float*) ((uintptr_t) input - ((-padding_top) & input_width));
47   const float* i1 = (const float*) ((uintptr_t) i0 + input_width);
48   if XNN_UNPREDICTABLE(padding_top != 0) {
49     i0 = zero;
50   }
51   const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
52 
53   float* o0 = output;
54 
55   size_t padded_input_height = input_height + padding_top + 1 /* padding bottom */;
56   size_t output_height = (padded_input_height - 3 /* kernel size */ + 2 /* subsampling */) / 2;
57   do {
58     if XNN_UNPREDICTABLE(padded_input_height < 4) {
59       i2 = zero;
60     }
61 
62     v128_t vi0x1357 = wasm_f32x4_const_splat(0.0f);
63     v128_t vi1x1357 = wasm_f32x4_const_splat(0.0f);
64     v128_t vi2x1357 = wasm_f32x4_const_splat(0.0f);
65 
66     size_t w = input_width;
67     for (; w >= 8 * sizeof(float); w -= 8 * sizeof(float)) {
68       v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
69 
70       const v128_t vi0x89AB = wasm_v128_load(i0);
71       const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
72       i0 += 8;
73       const v128_t vi1x89AB = wasm_v128_load(i1);
74       const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
75       i1 += 8;
76       const v128_t vi2x89AB = wasm_v128_load(i2);
77       const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
78       i2 += 8;
79 
80       const v128_t vi0x8ACE = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6);
81       const v128_t vi0x9BDF = wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7);
82       const v128_t vi1x8ACE = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6);
83       const v128_t vi1x9BDF = wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7);
84       const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6);
85       const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7);
86 
87       v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2));
88 
89       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
90 
91       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
92 
93       const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
94       vi0x1357 = vi0x9BDF;
95       const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
96       vi1x1357 = vi1x9BDF;
97       const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
98       vi2x1357 = vi2x9BDF;
99 
100       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
101 
102       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
103 
104       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
105 
106       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
107 
108       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
109 
110       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
111 
112       vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
113 
114       v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
115       vo0 = wasm_f32x4_pmin(vmax, vo0);
116 
117       wasm_v128_store(o0, vo0); o0 += 4;
118     }
119     // Last block has 0-7 pixels to process.
120     assert(w < 8 * sizeof(float));
121     if XNN_LIKELY(w != 0) {
122       v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
123 
124       const v128_t vi0x89AB = wasm_v128_load(i0);
125       const v128_t vi0xCDEF = wasm_v128_load(i0 + 4);
126       const v128_t vi1x89AB = wasm_v128_load(i1);
127       const v128_t vi1xCDEF = wasm_v128_load(i1 + 4);
128       const v128_t vi2x89AB = wasm_v128_load(i2);
129       const v128_t vi2xCDEF = wasm_v128_load(i2 + 4);
130 
131       const v128_t vi0x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 0, 2, 4, 6));
132       const v128_t vi0x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi0x89AB, vi0xCDEF, 1, 3, 5, 7));
133       const v128_t vi1x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 0, 2, 4, 6));
134       const v128_t vi1x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi1x89AB, vi1xCDEF, 1, 3, 5, 7));
135       const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6));
136       const v128_t vi2x9BDF = wasm_v128_and(vmask_odd,  wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7));
137 
138       v128_t vo0p1 = wasm_f32x4_mul(vi0x8ACE, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2));
139 
140       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x8ACE, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
141 
142       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x8ACE, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
143 
144       const v128_t vi0x7BDF = wasm_v32x4_shuffle(vi0x1357, vi0x9BDF, 3, 4, 5, 6);
145       const v128_t vi1x7BDF = wasm_v32x4_shuffle(vi1x1357, vi1x9BDF, 3, 4, 5, 6);
146       const v128_t vi2x7BDF = wasm_v32x4_shuffle(vi2x1357, vi2x9BDF, 3, 4, 5, 6);
147 
148       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi0x7BDF, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
149 
150       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
151 
152       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi2x7BDF, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
153 
154       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x9BDF, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
155 
156       vo0p1 = wasm_f32x4_add(vo0p1, wasm_f32x4_mul(vi1x9BDF, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
157 
158       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x9BDF, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
159 
160       vo0p0 = wasm_f32x4_add(vo0p0, vo0p1);
161 
162       v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
163       vo0 = wasm_f32x4_pmin(vmax, vo0);
164 
165       w += 1 * sizeof(float);
166       if (w & (8 * sizeof(float))) {
167         wasm_v128_store(o0, vo0); o0 += 4;
168       } else {
169         if (w & (4 * sizeof(float))) {
170           *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
171 
172           vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
173         }
174         if (w & (2 * sizeof(float))) {
175           *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
176         }
177       }
178     }
179 
180     i0 = (const float*) ((uintptr_t) i2 - input_decrement);
181     i1 = (const float*) ((uintptr_t) i0 + input_width);
182     i2 = (const float*) ((uintptr_t) i1 + input_width);
183 
184 
185     output_height -= 1;
186     padded_input_height -= 2;
187   } while (output_height != 0);
188 }
189