1 // Copyright 2022 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5
6 #include <assert.h>
7
8 #include <immintrin.h>
9
10 #include <xnnpack/intrinsics-polyfill.h>
11 #include <xnnpack/maxpool.h>
12
13
xnn_f16_maxpool_minmax_ukernel_9p8x__f16c_c8(size_t output_pixels,size_t kernel_elements,size_t channels,const void ** input,size_t input_offset,void * output,size_t input_increment,size_t output_increment,const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])14 void xnn_f16_maxpool_minmax_ukernel_9p8x__f16c_c8(
15 size_t output_pixels,
16 size_t kernel_elements,
17 size_t channels,
18 const void** input,
19 size_t input_offset,
20 void* output,
21 size_t input_increment,
22 size_t output_increment,
23 const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
24 {
25 assert(output_pixels != 0);
26 assert(kernel_elements != 0);
27 assert(channels != 0);
28
29 const __m256 voutput_min = _mm256_load_ps(params->avx.min);
30 const __m256 voutput_max = _mm256_load_ps(params->avx.max);
31 do {
32 uint16_t* o = output;
33 {
34 const uint16_t* i0 = *input++;
35 const uint16_t* i1 = *input++;
36 const uint16_t* i2 = *input++;
37 const uint16_t* i3 = *input++;
38 const uint16_t* i4 = *input++;
39 const uint16_t* i5 = *input++;
40 const uint16_t* i6 = *input++;
41 const uint16_t* i7 = *input++;
42 const uint16_t* i8 = *input++;
43 i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset);
44 i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset);
45 i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset);
46 i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset);
47 i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset);
48 i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset);
49 i6 = (const uint16_t*) ((uintptr_t) i6 + input_offset);
50 i7 = (const uint16_t*) ((uintptr_t) i7 + input_offset);
51 i8 = (const uint16_t*) ((uintptr_t) i8 + input_offset);
52 if (kernel_elements < 2) {
53 i1 = i0;
54 }
55 if (kernel_elements <= 2) {
56 i2 = i0;
57 }
58 if (kernel_elements < 4) {
59 i3 = i0;
60 }
61 if (kernel_elements <= 4) {
62 i4 = i0;
63 }
64 if (kernel_elements < 6) {
65 i5 = i0;
66 }
67 if (kernel_elements <= 6) {
68 i6 = i0;
69 }
70 if (kernel_elements < 8) {
71 i7 = i0;
72 }
73 if (kernel_elements <= 8) {
74 i8 = i0;
75 }
76
77 size_t c = channels;
78 for (; c >= 8; c -= 8) {
79 const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
80 i0 += 8;
81 const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
82 i1 += 8;
83 const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
84 i2 += 8;
85 const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3));
86 i3 += 8;
87 const __m256 vi4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4));
88 i4 += 8;
89 const __m256 vi5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5));
90 i5 += 8;
91 const __m256 vi6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6));
92 i6 += 8;
93 const __m256 vi7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i7));
94 i7 += 8;
95 const __m256 vi8 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i8));
96 i8 += 8;
97
98 const __m256 vmax018 = _mm256_max_ps(_mm256_max_ps(vi0, vi1), vi8);
99 const __m256 vmax23 = _mm256_max_ps(vi2, vi3);
100 const __m256 vmax45 = _mm256_max_ps(vi4, vi5);
101 const __m256 vmax67 = _mm256_max_ps(vi6, vi7);
102
103 const __m256 vmax2345 = _mm256_max_ps(vmax23, vmax45);
104 const __m256 vmax01678 = _mm256_max_ps(vmax018, vmax67);
105 const __m256 vmax = _mm256_max_ps(vmax2345, vmax01678);
106 const __m256 vout = _mm256_max_ps(_mm256_min_ps(vmax, voutput_max), voutput_min);
107
108 _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vout, _MM_FROUND_NO_EXC));
109 o += 8;
110 }
111 if (c != 0) {
112 const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
113 i0 += 8;
114 const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
115 i1 += 8;
116 const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
117 i2 += 8;
118 const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3));
119 i3 += 8;
120 const __m256 vi4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4));
121 i4 += 8;
122 const __m256 vi5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5));
123 i5 += 8;
124 const __m256 vi6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6));
125 i6 += 8;
126 const __m256 vi7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i7));
127 i7 += 8;
128 const __m256 vi8 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i8));
129 i8 += 8;
130
131 const __m256 vmax018 = _mm256_max_ps(_mm256_max_ps(vi0, vi1), vi8);
132 const __m256 vmax23 = _mm256_max_ps(vi2, vi3);
133 const __m256 vmax45 = _mm256_max_ps(vi4, vi5);
134 const __m256 vmax67 = _mm256_max_ps(vi6, vi7);
135
136 const __m256 vmax2345 = _mm256_max_ps(vmax23, vmax45);
137 const __m256 vmax01678 = _mm256_max_ps(vmax018, vmax67);
138 const __m256 vmax = _mm256_max_ps(vmax2345, vmax01678);
139 __m256 vout = _mm256_max_ps(_mm256_min_ps(vmax, voutput_max), voutput_min);
140
141 __m128i vh = _mm256_cvtps_ph(vout, _MM_FROUND_NO_EXC);
142 if (c & 4) {
143 _mm_storel_epi64((__m128i*) o, vh);
144 vh = _mm_unpackhi_epi64(vh, vh);
145 o += 4;
146 }
147 if (c & 2) {
148 _mm_storeu_si32(o, vh);
149 vh = _mm_srli_epi64(vh, 32);
150 o += 2;
151 }
152 if (c & 1) {
153 *o = _mm_extract_epi16(vh, 0);
154 o += 1;
155 }
156 }
157 }
158
159 for (ptrdiff_t k = (ptrdiff_t) kernel_elements - 9; k > 0; k -= 8) {
160 const uint16_t* i0 = *input++;
161 const uint16_t* i1 = *input++;
162 const uint16_t* i2 = *input++;
163 const uint16_t* i3 = *input++;
164 const uint16_t* i4 = *input++;
165 const uint16_t* i5 = *input++;
166 const uint16_t* i6 = *input++;
167 const uint16_t* i7 = *input++;
168 i0 = (const uint16_t*) ((uintptr_t) i0 + input_offset);
169 i1 = (const uint16_t*) ((uintptr_t) i1 + input_offset);
170 i2 = (const uint16_t*) ((uintptr_t) i2 + input_offset);
171 i3 = (const uint16_t*) ((uintptr_t) i3 + input_offset);
172 i4 = (const uint16_t*) ((uintptr_t) i4 + input_offset);
173 i5 = (const uint16_t*) ((uintptr_t) i5 + input_offset);
174 i6 = (const uint16_t*) ((uintptr_t) i6 + input_offset);
175 i7 = (const uint16_t*) ((uintptr_t) i7 + input_offset);
176 if (k < 2) {
177 i1 = i0;
178 }
179 if (k <= 2) {
180 i2 = i0;
181 }
182 if (k < 4) {
183 i3 = i0;
184 }
185 if (k <= 4) {
186 i4 = i0;
187 }
188 if (k < 6) {
189 i5 = i0;
190 }
191 if (k <= 6) {
192 i6 = i0;
193 }
194 if (k < 8) {
195 i7 = i0;
196 }
197
198 o = output;
199 size_t c = channels;
200 for (; c >= 8; c -= 8) {
201 const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
202 i0 += 8;
203 const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
204 i1 += 8;
205 const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
206 i2 += 8;
207 const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3));
208 i3 += 8;
209 const __m256 vi4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4));
210 i4 += 8;
211 const __m256 vi5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5));
212 i5 += 8;
213 const __m256 vi6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6));
214 i6 += 8;
215 const __m256 vi7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i7));
216 i7 += 8;
217 const __m256 vo = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) o));
218
219 const __m256 vmax01 = _mm256_max_ps(_mm256_max_ps(vi0, vi1), vo);
220 const __m256 vmax23 = _mm256_max_ps(vi2, vi3);
221 const __m256 vmax45 = _mm256_max_ps(vi4, vi5);
222 const __m256 vmax67 = _mm256_max_ps(vi6, vi7);
223
224 const __m256 vmax2345 = _mm256_max_ps(vmax23, vmax45);
225 const __m256 vmax0167 = _mm256_max_ps(vmax01, vmax67);
226 const __m256 vmax = _mm256_max_ps(vmax2345, vmax0167);
227 const __m256 vout = _mm256_max_ps(_mm256_min_ps(vmax, voutput_max), voutput_min);
228
229 _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vout, _MM_FROUND_NO_EXC));
230 o += 8;
231 }
232 if (c != 0) {
233 const __m256 vi0 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i0));
234 const __m256 vi1 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i1));
235 const __m256 vi2 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i2));
236 const __m256 vi3 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i3));
237 const __m256 vi4 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i4));
238 const __m256 vi5 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i5));
239 const __m256 vi6 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i6));
240 const __m256 vi7 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i7));
241 const __m256 vo = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) o));
242
243 const __m256 vmax01 = _mm256_max_ps(_mm256_max_ps(vi0, vi1), vo);
244 const __m256 vmax23 = _mm256_max_ps(vi2, vi3);
245 const __m256 vmax45 = _mm256_max_ps(vi4, vi5);
246 const __m256 vmax67 = _mm256_max_ps(vi6, vi7);
247
248 const __m256 vmax2345 = _mm256_max_ps(vmax23, vmax45);
249 const __m256 vmax0167 = _mm256_max_ps(vmax01, vmax67);
250 const __m256 vmax = _mm256_max_ps(vmax2345, vmax0167);
251 __m256 vout = _mm256_max_ps(_mm256_min_ps(vmax, voutput_max), voutput_min);
252
253 __m128i vh = _mm256_cvtps_ph(vout, _MM_FROUND_NO_EXC);
254 if (c & 4) {
255 _mm_storel_epi64((__m128i*) o, vh);
256 vh = _mm_unpackhi_epi64(vh, vh);
257 o += 4;
258 }
259 if (c & 2) {
260 _mm_storeu_si32(o, vh);
261 vh = _mm_srli_epi64(vh, 32);
262 o += 2;
263 }
264 if (c & 1) {
265 *o = _mm_extract_epi16(vh, 0);
266 o += 1;
267 }
268 }
269 }
270 input = (const void**) ((uintptr_t) input + input_increment);
271 output = (uint16_t*) ((uintptr_t) o + output_increment);
272 } while (--output_pixels != 0);
273 }
274