1 /*
2 * Copyright (c) 2017-2020, 2022 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "arm_compute/core/Types.h"
25 #include "arm_compute/runtime/CL/CLTensor.h"
26 #include "arm_compute/runtime/CL/CLTensorAllocator.h"
27 #include "arm_compute/runtime/CL/functions/CLActivationLayer.h"
28 #include "arm_compute/runtime/RuntimeContext.h"
29 #include "tests/CL/CLAccessor.h"
30 #include "tests/PaddingCalculator.h"
31 #include "tests/datasets/ActivationFunctionsDataset.h"
32 #include "tests/datasets/ShapeDatasets.h"
33 #include "tests/framework/Asserts.h"
34 #include "tests/framework/Macros.h"
35 #include "tests/framework/datasets/Datasets.h"
36 #include "tests/validation/Validation.h"
37 #include "tests/validation/fixtures/ActivationLayerFixture.h"
38
39 namespace arm_compute
40 {
41 namespace test
42 {
43 namespace validation
44 {
45 namespace
46 {
47 constexpr AbsoluteTolerance<float> tolerance_qsymm16(1.f);
48
49 /** Define tolerance of the activation layer.
50 *
51 * @param[in] activation The activation function used.
52 * @param[in] data_type Data type.
53 *
54 * @return Tolerance depending on the activation function.
55 */
tolerance(ActivationLayerInfo::ActivationFunction activation,DataType data_type)56 AbsoluteTolerance<float> tolerance(ActivationLayerInfo::ActivationFunction activation, DataType data_type)
57 {
58 constexpr float epsilon = 1e-6f;
59
60 switch(activation)
61 {
62 case ActivationLayerInfo::ActivationFunction::LINEAR:
63 return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.2f : epsilon);
64 case ActivationLayerInfo::ActivationFunction::SQUARE:
65 return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.1f : epsilon);
66 case ActivationLayerInfo::ActivationFunction::LOGISTIC:
67 return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.001f : epsilon);
68 case ActivationLayerInfo::ActivationFunction::LEAKY_RELU:
69 return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.00001f : epsilon);
70 case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
71 case ActivationLayerInfo::ActivationFunction::ELU:
72 case ActivationLayerInfo::ActivationFunction::SQRT:
73 case ActivationLayerInfo::ActivationFunction::GELU:
74 return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.01f : 0.00001f);
75 case ActivationLayerInfo::ActivationFunction::TANH:
76 return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.001f : 0.00001f);
77 case ActivationLayerInfo::ActivationFunction::HARD_SWISH:
78 return AbsoluteTolerance<float>(data_type == DataType::F16 ? 0.01f : epsilon);
79 default:
80 return AbsoluteTolerance<float>(epsilon);
81 }
82 }
83
84 /** CNN data types */
85 const auto CNNDataTypes = framework::dataset::make("DataType",
86 {
87 DataType::F16,
88 DataType::F32
89 });
90
91 /** Input data sets. */
92 const auto ActivationDataset = combine(combine(framework::dataset::make("InPlace", { false, true }), datasets::ActivationFunctions()), framework::dataset::make("AlphaBeta", { 0.5f, 1.f }));
93
94 } // namespace
95
96 TEST_SUITE(CL)
TEST_SUITE(ActivationLayer)97 TEST_SUITE(ActivationLayer)
98 // *INDENT-OFF*
99 // clang-format off
100 DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
101 framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching data types
102 TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
103 TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
104 TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8),
105 TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8), // Invalid quantization info
106 TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32), // Mismatching shapes
107 TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16),
108 TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16),
109 TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16), // Invalid activation function for QSYMM16
110 }),
111 framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F16),
112 TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::F32),
113 TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
114 TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QASYMM8),
115 TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::QASYMM8),
116 TensorInfo(TensorShape(30U, 11U, 2U), 1, DataType::F32),
117 TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f / 32768.f, 0)),
118 TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f / 32768.f, 0)),
119 TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::QSYMM16, QuantizationInfo(1.f / 32768.f, 0)),
120 })),
121 framework::dataset::make("ActivationInfo", { ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
122 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
123 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
124 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU),
125 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH),
126 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU),
127 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::TANH),
128 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC),
129 ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::SQRT),
130 })),
131 framework::dataset::make("Expected", { false, true, true, true, false, false, true, true, false })),
132 input_info, output_info, act_info, expected)
133 {
134 ARM_COMPUTE_EXPECT(bool(CLActivationLayer::validate(&input_info.clone()->set_is_resizable(false), (output_info.total_size() == 0) ? nullptr : &output_info.clone()->set_is_resizable(false), act_info)) == expected, framework::LogLevel::ERRORS);
135 }
136
137 // clang-format on
138 // *INDENT-ON*
139
140 /** [CLActivationLayerFixture snippet] **/
141 template <typename T>
142 using CLActivationLayerFixture = ActivationValidationFixture<CLTensor, CLAccessor, CLActivationLayer, T>;
143 /** [CLActivationLayerFixture snippet] **/
144
145 TEST_SUITE(Float)
TEST_SUITE(FP16)146 TEST_SUITE(FP16)
147 /** [CLActivationLayer Test snippet] **/
148 FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerFixture<half>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ActivationDataset),
149 framework::dataset::make("DataType",
150 DataType::F16)))
151 {
152 // Validate output
153 validate(CLAccessor(_target), _reference, tolerance(_function, _data_type));
154 }
155 /** [CLActivationLayer Test snippet] **/
156 TEST_SUITE_END() // FP16
157
TEST_SUITE(FP32)158 TEST_SUITE(FP32)
159 FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerFixture<float>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), ActivationDataset), framework::dataset::make("DataType",
160 DataType::F32)))
161 {
162 // Validate output
163 validate(CLAccessor(_target), _reference, tolerance(_function, _data_type));
164 }
165 TEST_SUITE_END() // FP32
166 TEST_SUITE_END() // Float
167
168 template <typename T>
169 using CLActivationLayerQuantizedFixture = ActivationValidationQuantizedFixture<CLTensor, CLAccessor, CLActivationLayer, T>;
170
171 const auto QuantizedActivationDataset8 = combine(combine(framework::dataset::make("InPlace", { false }),
172 concat(datasets::ActivationFunctionsQuantized(),
173 framework::dataset::make("ActivationFunction",
174 { ActivationLayerInfo::ActivationFunction::HARD_SWISH, ActivationLayerInfo::ActivationFunction::LEAKY_RELU }))),
175 framework::dataset::make("AlphaBeta", { 0.5f, 1.f }));
176
177 const auto QuantizedActivationDataset16 = combine(combine(framework::dataset::make("InPlace", { false }),
178 datasets::ActivationFunctionsQuantized()),
179 framework::dataset::make("AlphaBeta", { 0.5f, 1.f }));
180
181 TEST_SUITE(Quantized)
TEST_SUITE(QASYMM8)182 TEST_SUITE(QASYMM8)
183 FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerQuantizedFixture<uint8_t>, framework::DatasetMode::ALL, combine(combine(combine(datasets::SmallShapes(), QuantizedActivationDataset8),
184 framework::dataset::make("DataType",
185 DataType::QASYMM8)),
186 framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 128.0f) })))
187 {
188 // Validate output
189 validate(CLAccessor(_target), _reference, tolerance(_function, _data_type));
190 }
191 TEST_SUITE_END() // QASYMM8
TEST_SUITE(QASYMM8_SIGNED)192 TEST_SUITE(QASYMM8_SIGNED)
193 FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerQuantizedFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(combine(datasets::SmallShapes(), QuantizedActivationDataset8),
194 framework::dataset::make("DataType",
195 DataType::QASYMM8_SIGNED)),
196 framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 10.0f) })))
197 {
198 // Validate output
199 validate(CLAccessor(_target), _reference, tolerance(_function, _data_type));
200 }
201 TEST_SUITE_END() // QASYMM8_SIGNED
TEST_SUITE(QSYMM16)202 TEST_SUITE(QSYMM16)
203 FIXTURE_DATA_TEST_CASE(RunSmall, CLActivationLayerQuantizedFixture<int16_t>, framework::DatasetMode::ALL, combine(combine(combine(datasets::SmallShapes(), QuantizedActivationDataset16),
204 framework::dataset::make("DataType",
205 DataType::QSYMM16)),
206 framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 32768.f, 0) })))
207 {
208 // Validate output
209 validate(CLAccessor(_target), _reference, tolerance_qsymm16);
210 }
211 TEST_SUITE_END() // QSYMM16
212 TEST_SUITE_END() // Quantized
213
214 TEST_SUITE_END() // ActivationLayer
215 TEST_SUITE_END() // CL
216 } // namespace validation
217 } // namespace test
218 } // namespace arm_compute
219