1 // Copyright 2019 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5
6 #include <assert.h>
7
8 #include <emmintrin.h>
9
10 #include <xnnpack/argmaxpool.h>
11
12
xnn_f32_argmaxpool_ukernel_9x__sse2_c4(size_t output_pixels,size_t pooling_elements,size_t channels,const float ** input,size_t input_offset,float * output,uint32_t * index,size_t input_increment,size_t output_increment)13 void xnn_f32_argmaxpool_ukernel_9x__sse2_c4(
14 size_t output_pixels,
15 size_t pooling_elements,
16 size_t channels,
17 const float** input,
18 size_t input_offset,
19 float* output,
20 uint32_t* index,
21 size_t input_increment,
22 size_t output_increment) XNN_OOB_READS
23 {
24 assert(output_pixels != 0);
25 assert(pooling_elements != 0);
26 assert(pooling_elements <= 9);
27 assert(channels != 0);
28
29 do {
30 const float* i0 = input[0];
31 const float* i1 = input[1];
32 const float* i2 = input[2];
33 const float* i3 = input[3];
34 const float* i4 = input[4];
35 const float* i5 = input[5];
36 const float* i6 = input[6];
37 const float* i7 = input[7];
38 const float* i8 = input[8];
39 i0 = (const float*) ((uintptr_t) i0 + input_offset);
40 i1 = (const float*) ((uintptr_t) i1 + input_offset);
41 i2 = (const float*) ((uintptr_t) i2 + input_offset);
42 i3 = (const float*) ((uintptr_t) i3 + input_offset);
43 i4 = (const float*) ((uintptr_t) i4 + input_offset);
44 i5 = (const float*) ((uintptr_t) i5 + input_offset);
45 i6 = (const float*) ((uintptr_t) i6 + input_offset);
46 i7 = (const float*) ((uintptr_t) i7 + input_offset);
47 i8 = (const float*) ((uintptr_t) i8 + input_offset);
48 if (pooling_elements < 2) {
49 i1 = i0;
50 }
51 if (pooling_elements <= 2) {
52 i2 = i0;
53 }
54 if (pooling_elements < 4) {
55 i3 = i0;
56 }
57 if (pooling_elements <= 4) {
58 i4 = i0;
59 }
60 if (pooling_elements < 6) {
61 i5 = i0;
62 }
63 if (pooling_elements <= 6) {
64 i6 = i0;
65 }
66 if (pooling_elements < 8) {
67 i7 = i0;
68 }
69 if (pooling_elements <= 8) {
70 i8 = i0;
71 }
72
73 size_t c = channels;
74 for (; c >= 4; c -= 4) {
75 const __m128 vi0 = _mm_loadu_ps(i0);
76 i0 += 4;
77 const __m128 vi1 = _mm_loadu_ps(i1);
78 i1 += 4;
79 const __m128 vi2 = _mm_loadu_ps(i2);
80 i2 += 4;
81 const __m128 vi3 = _mm_loadu_ps(i3);
82 i3 += 4;
83 const __m128 vi4 = _mm_loadu_ps(i4);
84 i4 += 4;
85 const __m128 vi5 = _mm_loadu_ps(i5);
86 i5 += 4;
87 const __m128 vi6 = _mm_loadu_ps(i6);
88 i6 += 4;
89 const __m128 vi7 = _mm_loadu_ps(i7);
90 i7 += 4;
91 const __m128 vi8 = _mm_loadu_ps(i8);
92 i8 += 4;
93
94 __m128 vmax = vi0;
95 __m128i vidx = _mm_setzero_si128();
96
97 const __m128i vm1 = _mm_castps_si128(_mm_cmpgt_ps(vi1, vmax));
98 vmax = _mm_max_ps(vi1, vmax);
99 vidx = _mm_or_si128(_mm_andnot_si128(vm1, vidx), _mm_and_si128(vm1, _mm_set1_epi32(1)));
100
101 const __m128i vm2 = _mm_castps_si128(_mm_cmpgt_ps(vi2, vmax));
102 vmax = _mm_max_ps(vi2, vmax);
103 vidx = _mm_or_si128(_mm_andnot_si128(vm2, vidx), _mm_and_si128(vm2, _mm_set1_epi32(2)));
104
105 const __m128i vm3 = _mm_castps_si128(_mm_cmpgt_ps(vi3, vmax));
106 vmax = _mm_max_ps(vi3, vmax);
107 vidx = _mm_or_si128(_mm_andnot_si128(vm3, vidx), _mm_and_si128(vm3, _mm_set1_epi32(3)));
108
109 const __m128i vm4 = _mm_castps_si128(_mm_cmpgt_ps(vi4, vmax));
110 vmax = _mm_max_ps(vi4, vmax);
111 vidx = _mm_or_si128(_mm_andnot_si128(vm4, vidx), _mm_and_si128(vm4, _mm_set1_epi32(4)));
112
113 const __m128i vm5 = _mm_castps_si128(_mm_cmpgt_ps(vi5, vmax));
114 vmax = _mm_max_ps(vi5, vmax);
115 vidx = _mm_or_si128(_mm_andnot_si128(vm5, vidx), _mm_and_si128(vm5, _mm_set1_epi32(5)));
116
117 const __m128i vm6 = _mm_castps_si128(_mm_cmpgt_ps(vi6, vmax));
118 vmax = _mm_max_ps(vi6, vmax);
119 vidx = _mm_or_si128(_mm_andnot_si128(vm6, vidx), _mm_and_si128(vm6, _mm_set1_epi32(6)));
120
121 const __m128i vm7 = _mm_castps_si128(_mm_cmpgt_ps(vi7, vmax));
122 vmax = _mm_max_ps(vi7, vmax);
123 vidx = _mm_or_si128(_mm_andnot_si128(vm7, vidx), _mm_and_si128(vm7, _mm_set1_epi32(7)));
124
125 const __m128i vm8 = _mm_castps_si128(_mm_cmpgt_ps(vi8, vmax));
126 vmax = _mm_max_ps(vi8, vmax);
127 vidx = _mm_or_si128(_mm_andnot_si128(vm8, vidx), _mm_and_si128(vm8, _mm_set1_epi32(8)));
128
129 _mm_storeu_ps(output, vmax);
130 output += 4;
131 _mm_storeu_si128((__m128i*) index, vidx);
132 index += 4;
133 }
134 if (c != 0) {
135 const __m128 vi0 = _mm_loadu_ps(i0);
136 const __m128 vi1 = _mm_loadu_ps(i1);
137 const __m128 vi2 = _mm_loadu_ps(i2);
138 const __m128 vi3 = _mm_loadu_ps(i3);
139 const __m128 vi4 = _mm_loadu_ps(i4);
140 const __m128 vi5 = _mm_loadu_ps(i5);
141 const __m128 vi6 = _mm_loadu_ps(i6);
142 const __m128 vi7 = _mm_loadu_ps(i7);
143 const __m128 vi8 = _mm_loadu_ps(i8);
144
145 __m128 vmax = vi0;
146 __m128i vidx = _mm_setzero_si128();
147
148 const __m128i vm1 = _mm_castps_si128(_mm_cmpgt_ps(vi1, vmax));
149 vmax = _mm_max_ps(vi1, vmax);
150 vidx = _mm_or_si128(_mm_andnot_si128(vm1, vidx), _mm_and_si128(vm1, _mm_set1_epi32(1)));
151
152 const __m128i vm2 = _mm_castps_si128(_mm_cmpgt_ps(vi2, vmax));
153 vmax = _mm_max_ps(vi2, vmax);
154 vidx = _mm_or_si128(_mm_andnot_si128(vm2, vidx), _mm_and_si128(vm2, _mm_set1_epi32(2)));
155
156 const __m128i vm3 = _mm_castps_si128(_mm_cmpgt_ps(vi3, vmax));
157 vmax = _mm_max_ps(vi3, vmax);
158 vidx = _mm_or_si128(_mm_andnot_si128(vm3, vidx), _mm_and_si128(vm3, _mm_set1_epi32(3)));
159
160 const __m128i vm4 = _mm_castps_si128(_mm_cmpgt_ps(vi4, vmax));
161 vmax = _mm_max_ps(vi4, vmax);
162 vidx = _mm_or_si128(_mm_andnot_si128(vm4, vidx), _mm_and_si128(vm4, _mm_set1_epi32(4)));
163
164 const __m128i vm5 = _mm_castps_si128(_mm_cmpgt_ps(vi5, vmax));
165 vmax = _mm_max_ps(vi5, vmax);
166 vidx = _mm_or_si128(_mm_andnot_si128(vm5, vidx), _mm_and_si128(vm5, _mm_set1_epi32(5)));
167
168 const __m128i vm6 = _mm_castps_si128(_mm_cmpgt_ps(vi6, vmax));
169 vmax = _mm_max_ps(vi6, vmax);
170 vidx = _mm_or_si128(_mm_andnot_si128(vm6, vidx), _mm_and_si128(vm6, _mm_set1_epi32(6)));
171
172 const __m128i vm7 = _mm_castps_si128(_mm_cmpgt_ps(vi7, vmax));
173 vmax = _mm_max_ps(vi7, vmax);
174 vidx = _mm_or_si128(_mm_andnot_si128(vm7, vidx), _mm_and_si128(vm7, _mm_set1_epi32(7)));
175
176 const __m128i vm8 = _mm_castps_si128(_mm_cmpgt_ps(vi8, vmax));
177 vmax = _mm_max_ps(vi8, vmax);
178 vidx = _mm_or_si128(_mm_andnot_si128(vm8, vidx), _mm_and_si128(vm8, _mm_set1_epi32(8)));
179
180 if (c & 2) {
181 _mm_storel_pi((__m64*) output, vmax);
182 _mm_storel_epi64((__m128i*) index, vidx);
183 vmax = _mm_movehl_ps(vmax, vmax);
184 vidx = _mm_unpackhi_epi64(vidx, vidx);
185 output += 2;
186 index += 2;
187 }
188 if (c & 1) {
189 _mm_store_ss(output, vmax);
190 *index = (uint32_t) _mm_cvtsi128_si32(vidx);
191 output += 1;
192 index += 1;
193 }
194 }
195 input = (const float**) ((uintptr_t) input + input_increment);
196 output = (float*) ((uintptr_t) output + output_increment);
197 index = (uint32_t*) index;
198 } while (--output_pixels != 0);
199 }
200