1 /*
2 * Copyright (c) 2023 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #ifdef __aarch64__
25
26 #include "arm_compute/core/Types.h"
27 #include "arm_compute/runtime/NEON/functions/NEAddMulAdd.h"
28 #include "arm_compute/runtime/Tensor.h"
29 #include "arm_compute/runtime/TensorAllocator.h"
30
31 #include "tests/NEON/Accessor.h"
32 #include "tests/datasets/ShapeDatasets.h"
33 #include "tests/framework/Asserts.h"
34 #include "tests/framework/Macros.h"
35 #include "tests/framework/datasets/Datasets.h"
36 #include "tests/validation/Validation.h"
37 #include "tests/validation/fixtures/AddMulAddFixture.h"
38
39 namespace arm_compute
40 {
41 namespace test
42 {
43 namespace validation
44 {
45 namespace
46 {
47 constexpr AbsoluteTolerance<float> tolerance_fp32(0.001f); /**< Tolerance for floating point tests */
48 const AbsoluteTolerance<half> tolerance_fp16(half(0.1f)); /**< Tolerance for 16-bit floating point tests */
49 constexpr AbsoluteTolerance<float> tolerance_quant(1); /**< Tolerance for quantized tests */
50
51 const auto ActivationFunctionsDataset = framework::dataset::make("ActivationInfo",
52 {
53 ActivationLayerInfo(),
54 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
55
56 // Boundaries are aligned with Quantized Data ranges -- DOUBLE check before changing
57 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f),
58 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 8.f, -2.f)
59 });
60
61 // QASYMM8 test quantizations
62 const auto qasymm8_input1_qinfo_set = framework::dataset::make("Input1QInfo", { QuantizationInfo(0.1, 10) }); // Representable Range: [-1, 24.5]
63 const auto qasymm8_input2_qinfo_set = framework::dataset::make("Input2QInfo", { QuantizationInfo(0.2, 60) }); // Representable Range: [-12, 39]
64 const auto qasymm8_bn_mul_qinfo_set = framework::dataset::make("BnMulInfo", { QuantizationInfo(0.001, 55) }); // Representable Range: [-0.11, 0.2]
65 const auto qasymm8_bn_add_qinfo_set = framework::dataset::make("BnAddInfo", { QuantizationInfo(0.02, 20) }); // Representable Range: [-0.4, 4.7]
66
67 // Representable Range: [-9.36, 51.84], Expected F32 range: [-13, 63.5], leaving some space for saturation
68 const auto qasymm8_add_output_qinfo_set = framework::dataset::make("AddOutputInfo", { QuantizationInfo(0.24, 39) });
69
70 // Representable Range: [-4.8, 10.5], Expected FP32 range: [-6.985, 12.7], leaving some space for saturation
71 // This range also makes sense with the activation boundaries above, i.e. [-2, 8] for LU_BOUNDED_RELU and [0, 6] for BOUNDED_RELU
72 const auto qasymm8_final_output_qinfo_set = framework::dataset::make("FinalOutputInfo", { QuantizationInfo(0.06, 80) });
73
74 // QASYMM8_SIGNED test quantizations
75 const auto qasymm8_signed_input1_qinfo_set = framework::dataset::make("Input1QInfo", { QuantizationInfo(0.1, 10) }); // Representable Range: [-13.8, 11.7]
76 const auto qasymm8_signed_input2_qinfo_set = framework::dataset::make("Input2QInfo", { QuantizationInfo(0.2, -60) }); // Representable Range: [-13.6, 39.4]
77 const auto qasymm8_signed_bn_mul_qinfo_set = framework::dataset::make("BnMulInfo", { QuantizationInfo(0.001, 55) }); // Representable Range: [-0.183, 0.072]
78 const auto qasymm8_signed_bn_add_qinfo_set = framework::dataset::make("BnAddInfo", { QuantizationInfo(0.4, -120) }); // Representable Range: [-0.32, 9.08]
79
80 // Representable Range: [-21.36, 39.84], Expected F32 range: [-27.4, 51.1], leaving some space for saturation
81 const auto qasymm8_signed_add_output_qinfo_set = framework::dataset::make("AddOutputInfo", { QuantizationInfo(0.24, -39) });
82
83 // Representable Range: [-4.8, 10.5], Expected FP32 range: [-9.6713, 14.0942], leaving some space for saturation
84 // This range also makes sense with the activation boundaries above, i.e. [-2, 8] for LU_BOUNDED_RELU and [0, 6] for BOUNDED_RELU
85 const auto qasymm8_signed_final_output_qinfo_set = framework::dataset::make("FinalOutputInfo", { QuantizationInfo(0.06, -48) });
86
87 } // namespace
88
89 TEST_SUITE(NEON)
90 TEST_SUITE(AddMulAdd)
91
92 template <typename T>
93 using NEAddMulAddFloatFixture = AddMulAddFloatValidationFixture<Tensor, Accessor, NEAddMulAdd, T, true>;
94
95 template <typename T>
96 using NEAddMulAddFloatFixtureWoIntermOut = AddMulAddFloatValidationFixture<Tensor, Accessor, NEAddMulAdd, T, false>;
97
98 TEST_SUITE(Float)
99
TEST_SUITE(F32)100 TEST_SUITE(F32)
101 FIXTURE_DATA_TEST_CASE(RunSmall, NEAddMulAddFloatFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(),
102 framework::dataset::make("DataType", DataType::F32)),
103 ActivationFunctionsDataset))
104 {
105 // Validate outputs
106 validate(Accessor(_interm_target), _interm_reference); // Arithmetic Addition has more strict tolerance
107 validate(Accessor(_target), _reference, tolerance_fp32);
108 }
109
110 // This test is to stress the case when there is no intermediate output required (i.e. nullptr)
111 FIXTURE_DATA_TEST_CASE(RunSmallWithoutIntermOutput, NEAddMulAddFloatFixtureWoIntermOut<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(),
112 framework::dataset::make("DataType", DataType::F32)),
113 framework::dataset::make("ActivationInfo", { ActivationLayerInfo() })))
114 {
115 // Validate outputs
116 validate(Accessor(_target), _reference, tolerance_fp32);
117 }
118
119 FIXTURE_DATA_TEST_CASE(RunLarge, NEAddMulAddFloatFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(),
120 framework::dataset::make("DataType", DataType::F32)),
121 ActivationFunctionsDataset))
122 {
123 // Validate outputs
124 validate(Accessor(_interm_target), _interm_reference); // Arithmetic Addition has more strict tolerance
125 validate(Accessor(_target), _reference, tolerance_fp32);
126 }
127
128 TEST_SUITE_END() // F32
129
130 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_SUITE(F16)131 TEST_SUITE(F16)
132 FIXTURE_DATA_TEST_CASE(RunSmall, NEAddMulAddFloatFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallShapes(),
133 framework::dataset::make("DataType", DataType::F16)),
134 ActivationFunctionsDataset))
135 {
136 // Validate outputs
137 validate(Accessor(_interm_target), _interm_reference); // Arithmetic Addition has more strict tolerance
138 validate(Accessor(_target), _reference, tolerance_fp16);
139 }
140
141 FIXTURE_DATA_TEST_CASE(RunLarge, NEAddMulAddFloatFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(),
142 framework::dataset::make("DataType", DataType::F16)),
143 ActivationFunctionsDataset))
144 {
145 // Validate outputs
146 validate(Accessor(_interm_target), _interm_reference); // Arithmetic Addition has more strict tolerance
147 validate(Accessor(_target), _reference, tolerance_fp16);
148 }
149 TEST_SUITE_END() // F16
150 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
151
152 TEST_SUITE_END() // Float
153
154 template <typename T>
155 using NEAddMulQuantizedFixture = AddMulAddQuantizedValidationFixture<Tensor, Accessor, NEAddMulAdd, T, true>;
156
157 template <typename T>
158 using NEAddMulAddQuantizedFixtureWoIntermOut = AddMulAddQuantizedValidationFixture<Tensor, Accessor, NEAddMulAdd, T, false>;
159
160 TEST_SUITE(Quantized)
161
TEST_SUITE(QASYMM8)162 TEST_SUITE(QASYMM8)
163 FIXTURE_DATA_TEST_CASE(RunSmall, NEAddMulQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(combine(combine(combine(datasets::SmallShapes(),
164 framework::dataset::make("DataType", DataType::QASYMM8)),
165 ActivationFunctionsDataset),
166 qasymm8_input1_qinfo_set),
167 qasymm8_input2_qinfo_set),
168 qasymm8_bn_mul_qinfo_set),
169 qasymm8_bn_add_qinfo_set),
170 qasymm8_add_output_qinfo_set),
171 qasymm8_final_output_qinfo_set))
172 {
173 // Validate outputs
174 validate(Accessor(_interm_target), _interm_reference, tolerance_quant);
175 validate(Accessor(_target), _reference, tolerance_quant);
176 }
177
178 FIXTURE_DATA_TEST_CASE(RunLarge, NEAddMulQuantizedFixture<uint8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(combine(datasets::LargeShapes(),
179 framework::dataset::make("DataType", DataType::QASYMM8)),
180 ActivationFunctionsDataset),
181 qasymm8_input1_qinfo_set),
182 qasymm8_input2_qinfo_set),
183 qasymm8_bn_mul_qinfo_set),
184 qasymm8_bn_add_qinfo_set),
185 qasymm8_add_output_qinfo_set),
186 qasymm8_final_output_qinfo_set))
187 {
188 // Validate outputs
189 validate(Accessor(_interm_target), _interm_reference, tolerance_quant);
190 validate(Accessor(_target), _reference, tolerance_quant);
191 }
192 TEST_SUITE_END() // QASYMM8
193
TEST_SUITE(QASYMM8_SIGNED)194 TEST_SUITE(QASYMM8_SIGNED)
195 FIXTURE_DATA_TEST_CASE(RunSmall, NEAddMulQuantizedFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(combine(combine(combine(combine(datasets::SmallShapes(),
196 framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
197 ActivationFunctionsDataset),
198 qasymm8_signed_input1_qinfo_set),
199 qasymm8_signed_input2_qinfo_set),
200 qasymm8_signed_bn_mul_qinfo_set),
201 qasymm8_signed_bn_add_qinfo_set),
202 qasymm8_signed_add_output_qinfo_set),
203 qasymm8_signed_final_output_qinfo_set))
204 {
205 // Validate outputs
206 validate(Accessor(_interm_target), _interm_reference, tolerance_quant);
207 validate(Accessor(_target), _reference, tolerance_quant);
208 }
209
210 FIXTURE_DATA_TEST_CASE(RunLarge, NEAddMulQuantizedFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(combine(combine(datasets::LargeShapes(),
211 framework::dataset::make("DataType", DataType::QASYMM8_SIGNED)),
212 ActivationFunctionsDataset),
213 qasymm8_signed_input1_qinfo_set),
214 qasymm8_signed_input2_qinfo_set),
215 qasymm8_signed_bn_mul_qinfo_set),
216 qasymm8_signed_bn_add_qinfo_set),
217 qasymm8_signed_add_output_qinfo_set),
218 qasymm8_signed_final_output_qinfo_set))
219 {
220 // Validate outputs
221 validate(Accessor(_interm_target), _interm_reference, tolerance_quant);
222 validate(Accessor(_target), _reference, tolerance_quant);
223 }
224 TEST_SUITE_END() // QASYMM8_SIGNED
225
226 TEST_SUITE_END() // Quantized
227
228 TEST_SUITE_END() // AddMulAdd
229 TEST_SUITE_END() // NEON
230 } // namespace validation
231 } // namespace test
232 } // namespace arm_compute
233
234 #endif // __aarch64__
235