1 /*
2 * Copyright (c) 2017-2021 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "arm_compute/core/Types.h"
25 #include "arm_compute/runtime/NEON/functions/NEReductionOperation.h"
26 #include "arm_compute/runtime/Tensor.h"
27 #include "arm_compute/runtime/TensorAllocator.h"
28 #include "tests/NEON/Accessor.h"
29 #include "tests/PaddingCalculator.h"
30 #include "tests/datasets/ShapeDatasets.h"
31 #include "tests/framework/Asserts.h"
32 #include "tests/framework/Macros.h"
33 #include "tests/framework/datasets/Datasets.h"
34 #include "tests/validation/Validation.h"
35 #include "tests/validation/fixtures/ReductionOperationFixture.h"
36
37 namespace arm_compute
38 {
39 namespace test
40 {
41 namespace validation
42 {
43 namespace
44 {
45 /** Tolerance for float operations */
46 AbsoluteTolerance<float> tolerance_f32(0.0001f);
47 RelativeTolerance<float> rel_tolerance_f32(0.0001f);
48 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
49 AbsoluteTolerance<float> tolerance_f16(0.2f);
50 RelativeTolerance<float> rel_tolerance_f16(0.1f);
51 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
52 /** Tolerance for quantized operations */
53 RelativeTolerance<float> tolerance_quantized(1.f);
54
55 const auto ReductionOperations = framework::dataset::make("ReductionOperation",
56 {
57 ReductionOperation::SUM,
58 ReductionOperation::PROD,
59 ReductionOperation::MIN,
60 ReductionOperation::MAX,
61 });
62
63 const auto QuantizationInfos = framework::dataset::make("QuantizationInfo",
64 {
65 QuantizationInfo(1.f / 117, 10), // Numbers chosen so that the quantized values are in range of qasymm8_signed data type
66 QuantizationInfo(1.f / 64, 5),
67 QuantizationInfo(1.f / 32, 2)
68 });
69
70 const auto Axises = framework::dataset::make("Axis",
71 { 0, 1, 2, 3 });
72
73 const auto KeepDims = framework::dataset::make("KeepDims", { true, false });
74
75 } // namespace
76
77 TEST_SUITE(NEON)
TEST_SUITE(ReductionOperation)78 TEST_SUITE(ReductionOperation)
79
80 // *INDENT-OFF*
81 // clang-format off
82 DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
83 framework::dataset::make("InputInfo", { TensorInfo(TensorShape(128U, 64U), 1, DataType::F32), // Mismatching data type input/output
84 TensorInfo(TensorShape(128U, 64U), 2, DataType::F32), // Number of Input channels != 1
85 TensorInfo(TensorShape(128U, 64U), 1, DataType::S16), // DataType != F32
86 TensorInfo(TensorShape(128U, 64U), 1, DataType::F32), // Axis >= num_max_dimensions
87 TensorInfo(TensorShape(128U, 64U), 1, DataType::F32),
88 TensorInfo(TensorShape(128U, 64U), 1, DataType::F32) // Kept dimension when keep_dims = false
89 }),
90 framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(1U, 64U), 1, DataType::F16),
91 TensorInfo(TensorShape(1U, 64U), 1, DataType::F32),
92 TensorInfo(TensorShape(1U, 64U), 1, DataType::S16),
93 TensorInfo(TensorShape(1U, 64U), 1, DataType::F32),
94 TensorInfo(TensorShape(1U, 64U), 1, DataType::F32),
95 TensorInfo(TensorShape(1U, 64U), 1, DataType::F32)
96 })),
97 framework::dataset::make("Axis", { 0U, 0U, 0U, static_cast<unsigned int>(TensorShape::num_max_dimensions), 0U, 0U })),
98 framework::dataset::make("KeepDims", { true, true, true, true, true, false})),
99 framework::dataset::make("Expected", { false, false, false, false, true, false })),
100 input_info, output_info, axis, keep_dims, expected)
101 {
102 bool is_valid = bool(NEReductionOperation::validate(&input_info.clone()->set_is_resizable(false),
103 &output_info.clone()->set_is_resizable(true),
104 axis,
105 ReductionOperation::SUM_SQUARE,
106 keep_dims));
107 ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
108 }
109
110 DATA_TEST_CASE(ValidateNoPadding, framework::DatasetMode::ALL, combine(combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Axis",
111 { 0, 1 })), framework::dataset::make("ReductionOperation", {ReductionOperation::SUM,})), KeepDims),
112 shape, data_type, axis, op, keep_dims)
113 {
114 TensorShape input_shape = TensorShape(shape);
115 TensorInfo input_info = TensorInfo(input_shape, 1, data_type);
116 const bool is_arg_min_max = (op == ReductionOperation::ARG_IDX_MAX) || (op == ReductionOperation::ARG_IDX_MIN);
117 const bool _keep_dims = keep_dims && !is_arg_min_max;
118 const TensorShape output_shape = arm_compute::misc::shape_calculator::compute_reduced_shape(shape, axis, keep_dims);
119
120 // Create tensors
121 Tensor src = create_tensor<Tensor>(input_shape, data_type, 1, QuantizationInfo());
122 Tensor dst = create_tensor<Tensor>(output_shape, data_type, 1, QuantizationInfo());
123
124 // Create and configure function
125 NEReductionOperation reduction;
126 reduction.configure(&src, &dst, axis, op, _keep_dims);
127
128 validate(src.info()->padding(), PaddingSize(0, 0, 0, 0));
129 validate(dst.info()->padding(), PaddingSize(0, 0, 0, 0));
130 }
131 // clang-format on
132 // *INDENT-ON*
133
134 template <typename T>
135 using NEReductionOperationFixture = ReductionOperationFixture<Tensor, Accessor, NEReductionOperation, T>;
136
137 TEST_SUITE(FP32)
138 FIXTURE_DATA_TEST_CASE(RunSmall, NEReductionOperationFixture<float>, framework::DatasetMode::PRECOMMIT,
139 combine(combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::F32)), Axises), ReductionOperations), KeepDims))
140 {
141 // Validate output
142 validate(Accessor(_target), _reference, tolerance_f32);
143 }
144 FIXTURE_DATA_TEST_CASE(RunLarge, NEReductionOperationFixture<float>, framework::DatasetMode::NIGHTLY,
145 combine(combine(combine(combine(datasets::Large4DShapes(), framework::dataset::make("DataType", DataType::F32)), Axises), ReductionOperations), KeepDims))
146 {
147 // Validate output
148 validate(Accessor(_target), _reference, rel_tolerance_f32, 0, tolerance_f32);
149 }
150 TEST_SUITE_END() // FP32
151
152 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE(FP16)153 TEST_SUITE(FP16)
154 FIXTURE_DATA_TEST_CASE(RunSmall, NEReductionOperationFixture<half>, framework::DatasetMode::PRECOMMIT,
155 combine(combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::F16)), Axises), ReductionOperations), KeepDims))
156 {
157 // Validate output
158 validate(Accessor(_target), _reference, tolerance_f16);
159 }
160 FIXTURE_DATA_TEST_CASE(RunLarge, NEReductionOperationFixture<half>, framework::DatasetMode::NIGHTLY,
161 combine(combine(combine(combine(datasets::Large4DShapes(), framework::dataset::make("DataType", DataType::F16)), Axises), ReductionOperations), KeepDims))
162 {
163 // Validate output
164 validate(Accessor(_target), _reference, rel_tolerance_f16, 0, tolerance_f16);
165 }
166 TEST_SUITE_END() // FP16
167 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
168
169 template <typename T>
170 using NEReductionOperationQuantizedFixture = ReductionOperationQuantizedFixture<Tensor, Accessor, NEReductionOperation, T>;
171
172 TEST_SUITE(QASYMM8)
173 FIXTURE_DATA_TEST_CASE(RunSmall, NEReductionOperationQuantizedFixture<uint8_t>, framework::DatasetMode::ALL,
174 combine(combine(combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8)), Axises),
175 ReductionOperations),
176 QuantizationInfos),
177 KeepDims))
178 {
179 // Validate output
180 validate(Accessor(_target), _reference, tolerance_quantized);
181 }
182 TEST_SUITE_END() // QASYMM8
183
TEST_SUITE(QASYMM8_SIGNED)184 TEST_SUITE(QASYMM8_SIGNED)
185 FIXTURE_DATA_TEST_CASE(RunSmall, NEReductionOperationQuantizedFixture<int8_t>, framework::DatasetMode::ALL,
186 combine(combine(combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)), Axises),
187 ReductionOperations),
188 QuantizationInfos),
189 KeepDims))
190 {
191 // Validate output
192 validate(Accessor(_target), _reference, tolerance_quantized);
193 }
194 TEST_SUITE_END() // QASYMM8_SIGNED
195
196 TEST_SUITE_END() // ReductionOperation
197 TEST_SUITE_END() // Neon
198 } // namespace validation
199 } // namespace test
200 } // namespace arm_compute
201