xref: /aosp_15_r20/external/XNNPACK/src/f32-dwconv2d-chw/gen/3x3p1-minmax-wasmsimd-x86-splat-1x4.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-dwconv2d-chw/3x3p1-wasmsimd-splat.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 #include <xnnpack/dwconv.h>
15 #include <xnnpack/math.h>
16 
17 
18 
xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4(size_t input_height,size_t input_width,const float * input,const float * weights,const float * zero,float * output,uint32_t padding_top,const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4(
20     size_t input_height,
21     size_t input_width,
22     const float* input,
23     const float* weights,
24     const float* zero,
25     float* output,
26     uint32_t padding_top,
27     const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
28 {
29   assert(input_height != 0);
30   assert(input_width != 0);
31   assert(input_width % sizeof(float) == 0);
32   assert(padding_top == 1);
33 
34   const v128_t vmask = wasm_v128_load(params->scalar.mask);
35   const v128_t vmax = wasm_v128_load32_splat(&params->scalar.max);
36   const v128_t vmin = wasm_v128_load32_splat(&params->scalar.min);
37 
38   const v128_t vw0123 = wasm_v128_load(weights);
39   const v128_t vw4567 = wasm_v128_load(weights + 4);
40   const v128_t vw89 = wasm_v128_load64_splat(weights + 8);
41 
42   const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float));
43 
44   const float* i0 = zero;
45   const float* i1 = input;
46   const float* i2 = (const float*) ((uintptr_t) i1 + input_width);
47 
48   float* o0 = output;
49 
50   size_t output_height = input_height;
51   do {
52     if XNN_UNPREDICTABLE(output_height < 2) {
53       i2 = zero;
54     }
55 
56     v128_t vi0x0123 = wasm_f32x4_const_splat(0.0f);
57     v128_t vi1x0123 = wasm_f32x4_const_splat(0.0f);
58     v128_t vi2x0123 = wasm_f32x4_const_splat(0.0f);
59 
60     v128_t vi0x4567 = wasm_v128_load(i0); i0 += 4;
61     v128_t vi1x4567 = wasm_v128_load(i1); i1 += 4;
62     v128_t vi2x4567 = wasm_v128_load(i2); i2 += 4;
63 
64     size_t w = input_width;
65     for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) {
66       v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
67 
68       const v128_t vi0x89AB = wasm_v128_load(i0); i0 += 4;
69       const v128_t vi1x89AB = wasm_v128_load(i1); i1 += 4;
70       const v128_t vi2x89AB = wasm_v128_load(i2); i2 += 4;
71 
72       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
73 
74       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
75 
76       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
77 
78       const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
79       const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
80       const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
81 
82       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
83 
84       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
85 
86       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
87 
88       vi0x0123 = vi0x4567;
89       vi1x0123 = vi1x4567;
90       vi2x0123 = vi2x4567;
91 
92       const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vi0x89AB, 1, 2, 3, 4);
93       const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vi1x89AB, 1, 2, 3, 4);
94       const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vi2x89AB, 1, 2, 3, 4);
95 
96       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
97 
98       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
99 
100       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
101 
102       vi0x4567 = vi0x89AB;
103       vi1x4567 = vi1x89AB;
104       vi2x4567 = vi2x89AB;
105 
106 
107       v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
108       vo0 = wasm_f32x4_pmin(vmax, vo0);
109 
110       wasm_v128_store(o0, vo0); o0 += 4;
111     }
112     // Always process the last block of 1..4 pixels.
113     assert(w >= 1 * sizeof(float));
114     assert(w <= 4 * sizeof(float));
115     {
116       v128_t vo0p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0);
117 
118       vi0x4567 = wasm_v128_and(vmask, vi0x4567);
119       vi1x4567 = wasm_v128_and(vmask, vi1x4567);
120       vi2x4567 = wasm_v128_and(vmask, vi2x4567);
121 
122       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2)));
123 
124       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)));
125 
126       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)));
127 
128       const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6);
129       const v128_t vi1x3456 = wasm_v32x4_shuffle(vi1x0123, vi1x4567, 3, 4, 5, 6);
130       const v128_t vi2x3456 = wasm_v32x4_shuffle(vi2x0123, vi2x4567, 3, 4, 5, 6);
131 
132       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)));
133 
134       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0)));
135 
136       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3)));
137 
138       const v128_t vzero = wasm_f32x4_const_splat(0.0f);
139       const v128_t vi0x5678 = wasm_v32x4_shuffle(vi0x4567, vzero, 1, 2, 3, 4);
140       const v128_t vi1x5678 = wasm_v32x4_shuffle(vi1x4567, vzero, 1, 2, 3, 4);
141       const v128_t vi2x5678 = wasm_v32x4_shuffle(vi2x4567, vzero, 1, 2, 3, 4);
142 
143       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi0x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3)));
144 
145       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi1x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2)));
146 
147       vo0p0 = wasm_f32x4_add(vo0p0, wasm_f32x4_mul(vi2x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1)));
148 
149 
150       v128_t vo0 = wasm_f32x4_pmax(vmin, vo0p0);
151       vo0 = wasm_f32x4_pmin(vmax, vo0);
152 
153       if XNN_LIKELY(w == 4 * sizeof(float)) {
154         wasm_v128_store(o0, vo0); o0 += 4;
155       } else {
156         if (w & (2 * sizeof(float))) {
157           *((double*) o0) = wasm_f64x2_extract_lane(vo0, 0); o0 += 2;
158 
159           vo0 = wasm_v32x4_shuffle(vo0, vo0, 2, 3, 0, 1);
160         }
161         if (w & (1 * sizeof(float))) {
162           *o0 = wasm_f32x4_extract_lane(vo0, 0); o0 += 1;
163         }
164       }
165     }
166 
167     i0 = (const float*) ((uintptr_t) i1 - input_decrement);
168     i1 = (const float*) ((uintptr_t) i2 - input_decrement);
169     i2 = (const float*) ((uintptr_t) i1 + input_width);
170 
171 
172   } while (--output_height != 0);
173 }
174