1 /*
2 * Copyright (c) 2017-2021 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "arm_compute/core/Types.h"
25 #include "arm_compute/runtime/CL/CLTensor.h"
26 #include "arm_compute/runtime/CL/CLTensorAllocator.h"
27 #include "arm_compute/runtime/CL/functions/CLPoolingLayer.h"
28 #include "tests/CL/CLAccessor.h"
29 #include "tests/PaddingCalculator.h"
30 #include "tests/datasets/PoolingLayerDataset.h"
31 #include "tests/datasets/PoolingTypesDataset.h"
32 #include "tests/datasets/ShapeDatasets.h"
33 #include "tests/framework/Asserts.h"
34 #include "tests/framework/Macros.h"
35 #include "tests/framework/datasets/Datasets.h"
36 #include "tests/validation/Validation.h"
37 #include "tests/validation/fixtures/PoolingLayerFixture.h"
38
39 namespace arm_compute
40 {
41 namespace test
42 {
43 namespace validation
44 {
45 namespace
46 {
47 /** Input data sets for floating-point data types */
48 const auto PoolingLayerDatasetFP = combine(combine(combine(datasets::PoolingTypes(), framework::dataset::make("PoolingSize", { Size2D(2, 2), Size2D(3, 3), Size2D(5, 7) })),
49 framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 1, 0, 0), PadStrideInfo(1, 2, 1, 1), PadStrideInfo(2, 2, 1, 0) })),
50 framework::dataset::make("ExcludePadding", { true, false }));
51
52 const auto PoolingLayerDatasetFPSmall = combine(combine(combine(datasets::PoolingTypes(), framework::dataset::make("PoolingSize", { Size2D(2, 2), Size2D(3, 3) })),
53 framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 1, 0, 0) })),
54 framework::dataset::make("ExcludePadding", { true, false }));
55
56 /** Input data sets for asymmetric data type */
57 const auto PoolingLayerDatasetQASYMM8 = combine(concat(combine(combine(framework::dataset::make("PoolingType",
58 {
59 PoolingType::MAX, PoolingType::AVG,
60 }),
61 framework::dataset::make("PoolingSize", { Size2D(2, 2), Size2D(3, 3) })),
62 framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(1, 2, 1, 1), PadStrideInfo(2, 2, 1, 0) })),
63 combine(combine(framework::dataset::make("PoolingType", { PoolingType::AVG }), framework::dataset::make("PoolingSize", { Size2D(5, 7) })), framework::dataset::make("PadStride", { PadStrideInfo(2, 1, 0, 0) }))),
64 framework::dataset::make("ExcludePadding", { true }));
65
66 const auto PoolingLayerDatasetQASYMM8Small = combine(combine(combine(framework::dataset::make("PoolingType",
67 {
68 PoolingType::MAX, PoolingType::AVG,
69 }),
70 framework::dataset::make("PoolingSize", { Size2D(2, 2), Size2D(5, 7) })),
71 framework::dataset::make("PadStride", { PadStrideInfo(1, 2, 1, 1) })),
72 framework::dataset::make("ExcludePadding", { true }));
73
74 const auto PoolingLayerDatasetFPIndicesSmall = combine(combine(combine(framework::dataset::make("PoolingType",
75 { PoolingType::MAX }),
76 framework::dataset::make("PoolingSize", { Size2D(2, 2) })),
77 framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 2, 0, 0) })),
78 framework::dataset::make("ExcludePadding", { true, false }));
79
80 constexpr AbsoluteTolerance<float> tolerance_f32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for 32-bit floating-point type */
81 constexpr AbsoluteTolerance<float> tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for 16-bit floating-point type */
82 constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1); /**< Tolerance value for comparing reference's output against implementation's output for 8-bit asymmetric type */
83 constexpr AbsoluteTolerance<int8_t> tolerance_qasymm8_s(1); /**< Tolerance value for comparing reference's output against implementation's output for 8-bit signed asymmetric type */
84 const auto pool_data_layout_dataset = framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC });
85
86 const auto pool_fp_mixed_precision_dataset = framework::dataset::make("FpMixedPrecision", { true, false });
87
88 } // namespace
89
90 TEST_SUITE(CL)
TEST_SUITE(PoolingLayer)91 TEST_SUITE(PoolingLayer)
92
93 // *INDENT-OFF*
94 // clang-format off
95 DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
96 framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data type
97 TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid pad/size combination
98 TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Invalid pad/size combination
99 TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), // Invalid parameters
100 TensorInfo(TensorShape(15U, 13U, 5U), 1, DataType::F32), // Non-rectangular Global Pooling
101 TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32), // Invalid output Global Pooling
102 TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::QASYMM8),
103 TensorInfo(TensorShape(13U, 13U, 5U), 1, DataType::F32),
104 TensorInfo(TensorShape(1U, 16U, 1U), 1, DataType::F32),
105 }),
106 framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(25U, 11U, 2U), 1, DataType::F16),
107 TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32),
108 TensorInfo(TensorShape(25U, 16U, 2U), 1, DataType::F32),
109 TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8),
110 TensorInfo(TensorShape(1U, 1U, 5U), 1, DataType::F32),
111 TensorInfo(TensorShape(2U, 2U, 5U), 1, DataType::F32),
112 TensorInfo(TensorShape(12U, 12U, 5U), 1, DataType::QASYMM8),
113 TensorInfo(TensorShape(1U, 1U, 5U), 1, DataType::F32),
114 TensorInfo(TensorShape(1U, 15U, 1U), 1, DataType::F32),
115 })),
116 framework::dataset::make("PoolInfo", { PoolingLayerInfo(PoolingType::AVG, 3, DataLayout::NCHW, PadStrideInfo(1, 1, 0, 0)),
117 PoolingLayerInfo(PoolingType::AVG, 2, DataLayout::NCHW, PadStrideInfo(1, 1, 2, 0)),
118 PoolingLayerInfo(PoolingType::AVG, 2, DataLayout::NCHW, PadStrideInfo(1, 1, 0, 2)),
119 PoolingLayerInfo(PoolingType::L2, 3, DataLayout::NCHW, PadStrideInfo(1, 1, 0, 0)),
120 PoolingLayerInfo(PoolingType::AVG, DataLayout::NCHW),
121 PoolingLayerInfo(PoolingType::MAX, DataLayout::NCHW),
122 PoolingLayerInfo(PoolingType::AVG, 2, DataLayout::NHWC, PadStrideInfo(), false),
123 PoolingLayerInfo(PoolingType::AVG, DataLayout::NCHW),
124 PoolingLayerInfo(PoolingType::MAX, 2, DataLayout::NHWC, PadStrideInfo(1, 1, 0, 0), false),
125 })),
126 framework::dataset::make("Expected", { false, false, false, false, true, false, true, true , false})),
127 input_info, output_info, pool_info, expected)
128 {
129 ARM_COMPUTE_EXPECT(bool(CLPoolingLayer::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), pool_info)) == expected, framework::LogLevel::ERRORS);
130 }
131
132 // clang-format on
133 // *INDENT-ON*
134
135 template <typename T>
136 using CLPoolingLayerFixture = PoolingLayerValidationFixture<CLTensor, CLAccessor, CLPoolingLayer, T>;
137 template <typename T>
138 using CLPoolingLayerMixedDataLayoutFixture = PoolingLayerValidationFixture<CLTensor, CLAccessor, CLPoolingLayer, T, true>;
139
140 template <typename T>
141 using CLSpecialPoolingLayerFixture = SpecialPoolingLayerValidationFixture<CLTensor, CLAccessor, CLPoolingLayer, T>;
142
143 template <typename T>
144 using CLMixedPrecesionPoolingLayerFixture = PoolingLayerValidationMixedPrecisionFixture<CLTensor, CLAccessor, CLPoolingLayer, T>;
145
146 template <typename T>
147 using CLPoolingLayerIndicesFixture = PoolingLayerIndicesValidationFixture<CLTensor, CLAccessor, CLPoolingLayer, T>;
148
149 TEST_SUITE(Float)
TEST_SUITE(FP32)150 TEST_SUITE(FP32)
151 FIXTURE_DATA_TEST_CASE(RunSpecial, CLSpecialPoolingLayerFixture<float>, framework::DatasetMode::ALL, datasets::PoolingLayerDatasetSpecial() * framework::dataset::make("DataType", DataType::F32))
152 {
153 // Validate output
154 validate(CLAccessor(_target), _reference, tolerance_f32);
155 }
156 FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallNoneUnitShapes(), combine(PoolingLayerDatasetFPSmall,
157 framework::dataset::make("DataType",
158 DataType::F32))),
159 pool_data_layout_dataset))
160 {
161 // Validate output
162 validate(CLAccessor(_target), _reference, tolerance_f32);
163 }
164 FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLPoolingLayerMixedDataLayoutFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallNoneUnitShapes(),
165 combine(combine(combine(combine(datasets::PoolingTypes(),
166 framework::dataset::make("PoolingSize", { Size2D(2, 2) })),
167 framework::dataset::make("PadStride", { PadStrideInfo(2, 1, 0, 0) })),
168 framework::dataset::make("ExcludePadding", { false })),
169 framework::dataset::make("DataType", DataType::F32))),
170 pool_data_layout_dataset))
171 {
172 // Validate output
173 validate(CLAccessor(_target), _reference, tolerance_f32);
174 }
175 FIXTURE_DATA_TEST_CASE(RunLarge, CLPoolingLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), combine(PoolingLayerDatasetFP,
176 framework::dataset::make("DataType",
177 DataType::F32))),
178 pool_data_layout_dataset))
179 {
180 // Validate output
181 validate(CLAccessor(_target), _reference, tolerance_f32);
182 }
183
184 FIXTURE_DATA_TEST_CASE(RunSmallIndices, CLPoolingLayerIndicesFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallNoneUnitShapes(),
185 combine(PoolingLayerDatasetFPIndicesSmall,
186 framework::dataset::make("DataType",
187 DataType::F32))),
188 pool_data_layout_dataset))
189 {
190 // Validate output
191 validate(CLAccessor(_target), _reference, tolerance_f32);
192 validate(CLAccessor(_target_indices), _ref_indices);
193 }
194
195 TEST_SUITE(GlobalPooling)
196 // *INDENT-OFF*
197 // clang-format off
198 FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerFixture<float>, framework::DatasetMode::ALL,
199 combine(combine(combine(combine(combine(combine(
200 framework::dataset::make("InputShape", { TensorShape(27U, 13U, 2U),
201 TensorShape(27U, 13U, 2U, 4U)
202 }),
203 framework::dataset::make("PoolingType", { PoolingType::AVG, PoolingType::L2, PoolingType::MAX })),
204 framework::dataset::make("PoolingSize", { Size2D(27, 13) })),
205 framework::dataset::make("PadStride", PadStrideInfo(1, 1, 0, 0))),
206 framework::dataset::make("ExcludePadding", false)),
207 framework::dataset::make("DataType", DataType::F32)),
208 framework::dataset::make("DataLayout", DataLayout::NHWC)))
209 {
210 // Validate output
211 validate(CLAccessor(_target), _reference, tolerance_f32);
212 }
213
214 FIXTURE_DATA_TEST_CASE(RunLarge, CLPoolingLayerFixture<float>, framework::DatasetMode::NIGHTLY,
215 combine(combine(combine(combine(combine(combine(
216 framework::dataset::make("InputShape", { TensorShape(79U, 37U, 11U),
217 TensorShape(79U, 37U, 11U, 4U)
218 }),
219 framework::dataset::make("PoolingType", { PoolingType::AVG, PoolingType::L2, PoolingType::MAX })),
220 framework::dataset::make("PoolingSize", { Size2D(79, 37) })),
221 framework::dataset::make("PadStride", PadStrideInfo(1, 1, 0, 0))),
222 framework::dataset::make("ExcludePadding", false)),
223 framework::dataset::make("DataType", DataType::F32)),
224 framework::dataset::make("DataLayout", DataLayout::NHWC)))
225 {
226 // Validate output
227 validate(CLAccessor(_target), _reference, tolerance_f32);
228 }
229 // clang-format on
230 // *INDENT-ON*
231 TEST_SUITE_END() // GlobalPooling
232
TEST_SUITE_END()233 TEST_SUITE_END() // FP32
234
235 TEST_SUITE(FP16)
236 FIXTURE_DATA_TEST_CASE(RunSmall, CLMixedPrecesionPoolingLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallNoneUnitShapes(),
237 combine(PoolingLayerDatasetFPSmall,
238 framework::dataset::make("DataType", DataType::F16))),
239 pool_data_layout_dataset),
240 pool_fp_mixed_precision_dataset))
241 {
242 // Validate output
243 validate(CLAccessor(_target), _reference, tolerance_f16);
244 }
245 FIXTURE_DATA_TEST_CASE(RunLarge, CLMixedPrecesionPoolingLayerFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), combine(PoolingLayerDatasetFP,
246 framework::dataset::make("DataType", DataType::F16))),
247 pool_data_layout_dataset),
248 pool_fp_mixed_precision_dataset))
249 {
250 // Validate output
251 validate(CLAccessor(_target), _reference, tolerance_f16);
252 }
253 FIXTURE_DATA_TEST_CASE(RunSmallIndices, CLPoolingLayerIndicesFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallNoneUnitShapes(),
254 combine(PoolingLayerDatasetFPIndicesSmall,
255 framework::dataset::make("DataType",
256 DataType::F16))),
257 pool_data_layout_dataset))
258 {
259 // Validate output
260 validate(CLAccessor(_target), _reference, tolerance_f32);
261 validate(CLAccessor(_target_indices), _ref_indices);
262 }
263
264 TEST_SUITE(GlobalPooling)
265 // *INDENT-OFF*
266 // clang-format off
267 FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerFixture<half>, framework::DatasetMode::ALL,
268 combine(combine(combine(combine(combine(combine(
269 framework::dataset::make("InputShape", { TensorShape(27U, 13U, 2U),
270 TensorShape(27U, 13U, 2U, 4U)
271 }),
272 framework::dataset::make("PoolingType", { PoolingType::AVG, PoolingType::L2, PoolingType::MAX })),
273 framework::dataset::make("PoolingSize", { Size2D(27, 13) })),
274 framework::dataset::make("PadStride", PadStrideInfo(1, 1, 0, 0))),
275 framework::dataset::make("ExcludePadding", false)),
276 framework::dataset::make("DataType", DataType::F16)),
277 framework::dataset::make("DataLayout", DataLayout::NHWC)))
278 {
279 // Validate output
280 validate(CLAccessor(_target), _reference, tolerance_f16);
281 }
282
283 FIXTURE_DATA_TEST_CASE(RunLarge, CLPoolingLayerFixture<half>, framework::DatasetMode::NIGHTLY,
284 combine(combine(combine(combine(combine(combine(
285 framework::dataset::make("InputShape", { TensorShape(79U, 37U, 11U),
286 TensorShape(79U, 37U, 11U, 4U)
287 }),
288 framework::dataset::make("PoolingType", { PoolingType::AVG, PoolingType::L2, PoolingType::MAX })),
289 framework::dataset::make("PoolingSize", { Size2D(79, 37) })),
290 framework::dataset::make("PadStride", PadStrideInfo(1, 1, 0, 0))),
291 framework::dataset::make("ExcludePadding", false)),
292 framework::dataset::make("DataType", DataType::F16)),
293 framework::dataset::make("DataLayout", DataLayout::NHWC)))
294 {
295 // Validate output
296 validate(CLAccessor(_target), _reference, tolerance_f16);
297 }
298 // clang-format on
299 // *INDENT-ON*
300 TEST_SUITE_END() // GlobalPooling
301
302 TEST_SUITE_END() // FP16
303 TEST_SUITE_END() // Float
304
305 TEST_SUITE(Quantized)
306
307 template <typename T>
308 using CLPoolingLayerQuantizedFixture = PoolingLayerValidationQuantizedFixture<CLTensor, CLAccessor, CLPoolingLayer, T>;
309 template <typename T>
310 using CLPoolingLayerQuantizedMixedDataLayoutFixture = PoolingLayerValidationQuantizedFixture<CLTensor, CLAccessor, CLPoolingLayer, T, true>;
311
312 TEST_SUITE(QASYMM8)
313 FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallNoneUnitShapes(),
314 combine(PoolingLayerDatasetQASYMM8Small,
315 framework::dataset::make("DataType", DataType::QASYMM8))),
316 pool_data_layout_dataset),
317 framework::dataset::make("InputQuantInfo", { QuantizationInfo(1.f / 255.f, 10), QuantizationInfo(1.f / 255.f, 10) })),
318 framework::dataset::make("OutputQuantInfo", { QuantizationInfo(1.f / 255.f, 5), QuantizationInfo(1.f / 255.f, 10) })))
319 {
320 // Validate output
321 validate(CLAccessor(_target), _reference, tolerance_qasymm8);
322 }
323 FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLPoolingLayerQuantizedMixedDataLayoutFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallNoneUnitShapes(),
324 combine(combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }),
325 framework::dataset::make("PoolingSize", { Size2D(2, 2) })),
326 framework::dataset::make("PadStride", { PadStrideInfo(1, 2, 1, 1) })),
327 framework::dataset::make("ExcludePadding", { true })),
328 framework::dataset::make("DataType", DataType::QASYMM8))),
329 framework::dataset::make("DataLayout", { DataLayout::NHWC, DataLayout::NCHW })),
330 framework::dataset::make("InputQuantInfo", { QuantizationInfo(1.f / 255.f, 10) })),
331 framework::dataset::make("OutputQuantInfo", { QuantizationInfo(1.f / 255.f, 5) })))
332 {
333 // Validate output
334 validate(CLAccessor(_target), _reference, tolerance_qasymm8);
335 }
336 TEST_SUITE_END() // QASYMM8
337
TEST_SUITE(QASYMM8_SIGNED)338 TEST_SUITE(QASYMM8_SIGNED)
339 FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerQuantizedFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallNoneUnitShapes(),
340 combine(PoolingLayerDatasetQASYMM8Small,
341 framework::dataset::make("DataType", DataType::QASYMM8_SIGNED))),
342 pool_data_layout_dataset),
343 framework::dataset::make("InputQuantInfo", { QuantizationInfo(1.f / 127.f, -10), QuantizationInfo(1.f / 127.f, -10) })),
344 framework::dataset::make("OutputQuantInfo", { QuantizationInfo(1.f / 127.f, -5), QuantizationInfo(1.f / 127.f, -10) })))
345 {
346 // Validate output
347 validate(CLAccessor(_target), _reference, tolerance_qasymm8_s);
348 }
349 FIXTURE_DATA_TEST_CASE(RunMixedDataLayout, CLPoolingLayerQuantizedMixedDataLayoutFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallNoneUnitShapes(),
350 combine(combine(combine(combine(framework::dataset::make("PoolingType", { PoolingType::MAX, PoolingType::AVG }),
351 framework::dataset::make("PoolingSize", { Size2D(2, 2) })),
352 framework::dataset::make("PadStride", { PadStrideInfo(1, 2, 1, 1) })),
353 framework::dataset::make("ExcludePadding", { true })),
354 framework::dataset::make("DataType", DataType::QASYMM8_SIGNED))),
355 framework::dataset::make("DataLayout", { DataLayout::NHWC, DataLayout::NCHW })),
356 framework::dataset::make("InputQuantInfo", { QuantizationInfo(1.f / 127.f, -10) })),
357 framework::dataset::make("OutputQuantInfo", { QuantizationInfo(1.f / 127.f, -10) })))
358 {
359 // Validate output
360 validate(CLAccessor(_target), _reference, tolerance_qasymm8_s);
361 }
362 TEST_SUITE_END() // QASYMM8_SIGNED
363 TEST_SUITE_END() // Quantized
364 TEST_SUITE_END() // PoolingLayer
365 TEST_SUITE_END() // CL
366 } // namespace validation
367 } // namespace test
368 } // namespace arm_compute
369