xref: /aosp_15_r20/external/XNNPACK/src/f32-gavgpool/7p7x-minmax-wasmsimd-x86-c4.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Copyright 2020 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5 
6 #include <assert.h>
7 
8 #include <wasm_simd128.h>
9 
10 #include <xnnpack/gavgpool.h>
11 #include <xnnpack/math.h>
12 
13 
xnn_f32_gavgpool_minmax_ukernel_7p7x__wasmsimd_x86_c4(size_t rows,size_t channels,const float * input,size_t input_stride,const float * zero,float * buffer,float * output,const union xnn_f32_scaleminmax_params params[restrict XNN_MIN_ELEMENTS (1)])14 void xnn_f32_gavgpool_minmax_ukernel_7p7x__wasmsimd_x86_c4(
15     size_t rows,
16     size_t channels,
17     const float* input,
18     size_t input_stride,
19     const float* zero,
20     float* buffer,
21     float* output,
22     const union xnn_f32_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
23 {
24   assert(rows > 7);
25   assert(channels != 0);
26 
27   const float* i0 = input;
28   const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
29   const float* i2 = (const float*) ((uintptr_t) i1 + input_stride);
30   const float* i3 = (const float*) ((uintptr_t) i2 + input_stride);
31   const float* i4 = (const float*) ((uintptr_t) i3 + input_stride);
32   const float* i5 = (const float*) ((uintptr_t) i4 + input_stride);
33   const float* i6 = (const float*) ((uintptr_t) i5 + input_stride);
34   const size_t packed_channels = round_up_po2(channels, 4);
35   const size_t input_increment = 7 * input_stride - packed_channels * sizeof(float);
36 
37   float* b = buffer;
38   for (size_t c = 0; c < channels; c += 4) {
39     const v128_t vi0 = wasm_v128_load(i0);
40     i0 += 4;
41     const v128_t vi1 = wasm_v128_load(i1);
42     i1 += 4;
43     const v128_t vi2 = wasm_v128_load(i2);
44     i2 += 4;
45     const v128_t vi3 = wasm_v128_load(i3);
46     i3 += 4;
47     const v128_t vi4 = wasm_v128_load(i4);
48     i4 += 4;
49     const v128_t vi5 = wasm_v128_load(i5);
50     i5 += 4;
51     const v128_t vi6 = wasm_v128_load(i6);
52     i6 += 4;
53 
54     const v128_t vsum01 = wasm_f32x4_add(vi0, vi1);
55     const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
56     const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
57 
58     const v128_t vsum016 = wasm_f32x4_add(vsum01, vi6);
59     const v128_t vsum2345 = wasm_f32x4_add(vsum23, vsum45);
60 
61     const v128_t vsum = wasm_f32x4_add(vsum016, vsum2345);
62 
63     wasm_v128_store(b, vsum);
64     b += 4;
65   }
66   for (rows -= 7; rows > 7; rows -= 7) {
67     b = buffer;
68 
69     i0 = (const float*) ((uintptr_t) i0 + input_increment);
70     i1 = (const float*) ((uintptr_t) i1 + input_increment);
71     i2 = (const float*) ((uintptr_t) i2 + input_increment);
72     i3 = (const float*) ((uintptr_t) i3 + input_increment);
73     i4 = (const float*) ((uintptr_t) i4 + input_increment);
74     i5 = (const float*) ((uintptr_t) i5 + input_increment);
75     i6 = (const float*) ((uintptr_t) i6 + input_increment);
76 
77     for (size_t c = 0; c < channels; c += 4) {
78       const v128_t vi0 = wasm_v128_load(i0);
79       i0 += 4;
80       const v128_t vi1 = wasm_v128_load(i1);
81       i1 += 4;
82       const v128_t vi2 = wasm_v128_load(i2);
83       i2 += 4;
84       const v128_t vi3 = wasm_v128_load(i3);
85       i3 += 4;
86       const v128_t vi4 = wasm_v128_load(i4);
87       i4 += 4;
88       const v128_t vi5 = wasm_v128_load(i5);
89       i5 += 4;
90       const v128_t vi6 = wasm_v128_load(i6);
91       i6 += 4;
92       const v128_t vacc = wasm_v128_load(b);
93 
94       const v128_t vsum01 = wasm_f32x4_add(vi0, vi1);
95       const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
96       const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
97       const v128_t vsum6a = wasm_f32x4_add(vi6, vacc);
98 
99       const v128_t vsum0123 = wasm_f32x4_add(vsum01, vsum23);
100       const v128_t vsum456a = wasm_f32x4_add(vsum45, vsum6a);
101 
102       const v128_t vsum = wasm_f32x4_add(vsum0123, vsum456a);
103 
104       wasm_v128_store(b, vsum); b += 4;
105     }
106   }
107 
108   i0 = (const float*) ((uintptr_t) i0 + input_increment);
109   i1 = (const float*) ((uintptr_t) i1 + input_increment);
110   if (rows < 2) {
111     i1 = zero;
112   }
113   i2 = (const float*) ((uintptr_t) i2 + input_increment);
114   if (rows <= 2) {
115     i2 = zero;
116   }
117   i3 = (const float*) ((uintptr_t) i3 + input_increment);
118   if (rows < 4) {
119     i3 = zero;
120   }
121   i4 = (const float*) ((uintptr_t) i4 + input_increment);
122   if (rows <= 4) {
123     i4 = zero;
124   }
125   i5 = (const float*) ((uintptr_t) i5 + input_increment);
126   if (rows < 6) {
127     i5 = zero;
128   }
129   i6 = (const float*) ((uintptr_t) i6 + input_increment);
130   if (rows <= 6) {
131     i6 = zero;
132   }
133   const v128_t vscale = wasm_v128_load32_splat(&params->scalar.scale);
134   const v128_t vmin = wasm_v128_load32_splat(&params->scalar.min);
135   const v128_t vmax = wasm_v128_load32_splat(&params->scalar.max);
136 
137   b = buffer;
138   while (channels >= 4) {
139     const v128_t vi0 = wasm_v128_load(i0);
140     i0 += 4;
141     const v128_t vi1 = wasm_v128_load(i1);
142     i1 += 4;
143     const v128_t vi2 = wasm_v128_load(i2);
144     i2 += 4;
145     const v128_t vi3 = wasm_v128_load(i3);
146     i3 += 4;
147     const v128_t vi4 = wasm_v128_load(i4);
148     i4 += 4;
149     const v128_t vi5 = wasm_v128_load(i5);
150     i5 += 4;
151     const v128_t vi6 = wasm_v128_load(i6);
152     i6 += 4;
153     const v128_t vacc = wasm_v128_load(b);
154     b += 4;
155 
156     const v128_t vsum01 = wasm_f32x4_add(vi0, vi1);
157     const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
158     const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
159     const v128_t vsum6a = wasm_f32x4_add(vi6, vacc);
160 
161     const v128_t vsum0123 = wasm_f32x4_add(vsum01, vsum23);
162     const v128_t vsum456a = wasm_f32x4_add(vsum45, vsum6a);
163 
164     const v128_t vsum = wasm_f32x4_add(vsum0123, vsum456a);
165 
166     v128_t vout = wasm_f32x4_mul(vsum, vscale);
167     vout = wasm_f32x4_pmax(vmin, vout);
168     vout = wasm_f32x4_pmin(vmax, vout);
169 
170     wasm_v128_store(output, vout);
171     output += 4;
172 
173     channels -= 4;
174   }
175   if (channels != 0) {
176     const v128_t vi0 = wasm_v128_load(i0);
177     const v128_t vi1 = wasm_v128_load(i1);
178     const v128_t vi2 = wasm_v128_load(i2);
179     const v128_t vi3 = wasm_v128_load(i3);
180     const v128_t vi4 = wasm_v128_load(i4);
181     const v128_t vi5 = wasm_v128_load(i5);
182     const v128_t vi6 = wasm_v128_load(i6);
183     const v128_t vacc = wasm_v128_load(b);
184 
185     const v128_t vsum01 = wasm_f32x4_add(vi0, vi1);
186     const v128_t vsum23 = wasm_f32x4_add(vi2, vi3);
187     const v128_t vsum45 = wasm_f32x4_add(vi4, vi5);
188     const v128_t vsum6a = wasm_f32x4_add(vi6, vacc);
189 
190     const v128_t vsum0123 = wasm_f32x4_add(vsum01, vsum23);
191     const v128_t vsum456a = wasm_f32x4_add(vsum45, vsum6a);
192 
193     const v128_t vsum = wasm_f32x4_add(vsum0123, vsum456a);
194 
195     v128_t vout = wasm_f32x4_mul(vsum, vscale);
196     vout = wasm_f32x4_pmax(vmin, vout);
197     vout = wasm_f32x4_pmin(vmax, vout);
198 
199     if (channels & 2) {
200       *((double*) output) = wasm_f64x2_extract_lane(vout, 0);
201       output += 2;
202       vout = wasm_v32x4_shuffle(vout, vout, 2, 3, 2, 3);
203     }
204     if (channels & 1) {
205       *output++ = wasm_f32x4_extract_lane(vout, 0);
206     }
207   }
208 }
209