xref: /aosp_15_r20/external/ComputeLibrary/tests/validation/fixtures/ActivationLayerFixture.h (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_TEST_ACTIVATION_LAYER_FIXTURE
25 #define ARM_COMPUTE_TEST_ACTIVATION_LAYER_FIXTURE
26 
27 #include "arm_compute/core/TensorShape.h"
28 #include "arm_compute/core/Types.h"
29 #include "tests/AssetsLibrary.h"
30 #include "tests/Globals.h"
31 #include "tests/IAccessor.h"
32 #include "tests/framework/Asserts.h"
33 #include "tests/framework/Fixture.h"
34 #include "tests/framework/ParametersLibrary.h"
35 #include "tests/validation/Helpers.h"
36 #include "tests/validation/reference/ActivationLayer.h"
37 
38 #include <random>
39 
40 namespace arm_compute
41 {
42 namespace test
43 {
44 namespace validation
45 {
46 template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
47 class ActivationValidationGenericFixture : public framework::Fixture
48 {
49 public:
ActivationValidationGenericFixture()50     ActivationValidationGenericFixture()
51         : _target(parameters->get_ctx<TensorType>())
52     {
53     }
54 
55     template <typename...>
setup(TensorShape shape,bool in_place,ActivationLayerInfo::ActivationFunction function,float alpha_beta,DataType data_type,QuantizationInfo quantization_info)56     void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type, QuantizationInfo quantization_info)
57     {
58         ActivationLayerInfo info(function, alpha_beta, alpha_beta);
59 
60         _in_place                 = in_place;
61         _data_type                = data_type;
62         _output_quantization_info = calculate_output_quantization_info(_data_type, info, quantization_info);
63         _input_quantization_info  = in_place ? _output_quantization_info : quantization_info;
64 
65         _function  = function;
66         _target    = compute_target(shape, info);
67         _reference = compute_reference(shape, info);
68     }
69 
70 protected:
get_boundary_values(T min,T max)71     std::vector<T> get_boundary_values(T min, T max)
72     {
73         // This function will return a vector filled with the following values that can
74         // represent two partitions derived from equivalent partitioning.
75         // * Lower parition: min, min + delta, lower quarter (nominal), center - delta
76         // * Upper partition: center, center + delta, upper quarter (nominal), max - delta, max
77         const auto delta         = is_data_type_float(_data_type) ? T(0.1f) : T(1);
78         const auto center_value  = (min + max) / 2;
79         const auto lower_quarter = (min + center_value) / 2;
80         const auto upper_quarter = (center_value + max) / 2;
81 
82         std::vector<T> boundary_values{};
83 
84         // To ensure all the inserted values are within the given range after subtracing/adding delta
85         auto insert_values = [&boundary_values, &min, &max](const std::initializer_list<T> &new_values)
86         {
87             for(auto &v : new_values)
88             {
89                 if(v >= min && v <= max)
90                 {
91                     boundary_values.emplace_back(v);
92                 }
93             }
94         };
95 
96         insert_values({ min, static_cast<T>(min + delta), static_cast<T>(lower_quarter), static_cast<T>(center_value - delta) });                               // lower partition
97         insert_values({ static_cast<T>(center_value), static_cast<T>(center_value + delta), static_cast<T>(upper_quarter), static_cast<T>(max - delta), max }); // upper partition
98 
99         return boundary_values;
100     }
101 
102     template <typename U>
fill(U && tensor)103     void fill(U &&tensor)
104     {
105         if(is_data_type_float(_data_type))
106         {
107             float min_bound = 0;
108             float max_bound = 0;
109             std::tie(min_bound, max_bound) = get_activation_layer_test_bounds<T>(_function, _data_type);
110             library->fill_static_values(tensor, get_boundary_values(static_cast<T>(min_bound), static_cast<T>(max_bound)));
111         }
112         else
113         {
114             PixelValue min{};
115             PixelValue max{};
116             std::tie(min, max) = get_min_max(tensor.data_type());
117             library->fill_static_values(tensor, get_boundary_values(min.get<T>(), max.get<T>()));
118         }
119     }
120 
compute_target(const TensorShape & shape,ActivationLayerInfo info)121     TensorType compute_target(const TensorShape &shape, ActivationLayerInfo info)
122     {
123         auto ctx = parameters->get_ctx<TensorType>();
124         // Create tensors
125         TensorType src = create_tensor<TensorType>(shape, _data_type, 1, _input_quantization_info, DataLayout::NCHW, ctx);
126         TensorType dst = create_tensor<TensorType>(shape, _data_type, 1, _output_quantization_info, DataLayout::NCHW, ctx);
127 
128         // Create and configure function
129         FunctionType act_layer(ctx);
130 
131         TensorType *dst_ptr = _in_place ? nullptr : &dst;
132 
133         act_layer.configure(&src, dst_ptr, info);
134 
135         ARM_COMPUTE_ASSERT(src.info()->is_resizable());
136         ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
137 
138         // Allocate tensors
139         src.allocator()->allocate();
140         ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
141 
142         if(!_in_place)
143         {
144             dst.allocator()->allocate();
145             ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
146         }
147 
148         // Fill tensors
149         fill(AccessorType(src));
150 
151         // Compute function
152         act_layer.run();
153 
154         if(_in_place)
155         {
156             return src;
157         }
158         else
159         {
160             return dst;
161         }
162     }
163 
compute_reference(const TensorShape & shape,ActivationLayerInfo info)164     SimpleTensor<T> compute_reference(const TensorShape &shape, ActivationLayerInfo info)
165     {
166         // Create reference
167         SimpleTensor<T> src{ shape, _data_type, 1, _input_quantization_info };
168 
169         // Fill reference
170         fill(src);
171 
172         return reference::activation_layer<T>(src, info, _output_quantization_info);
173     }
174 
175 private:
calculate_output_quantization_info(DataType dt,const ActivationLayerInfo & act_info,const QuantizationInfo & default_qinfo)176     QuantizationInfo calculate_output_quantization_info(DataType dt, const ActivationLayerInfo &act_info, const QuantizationInfo &default_qinfo)
177     {
178         auto qasymm8_max        = float(std::numeric_limits<uint8_t>::max()) + 1.f;
179         auto qasymm8_signed_max = float(std::numeric_limits<int8_t>::max()) + 1.f;
180         auto qsymm16_max        = float(std::numeric_limits<int16_t>::max()) + 1.f;
181 
182         switch(act_info.activation())
183         {
184             case ActivationLayerInfo::ActivationFunction::TANH:
185                 if(dt == DataType::QSYMM16)
186                 {
187                     return QuantizationInfo(1.f / qsymm16_max, 0);
188                 }
189                 else if(dt == DataType::QASYMM8)
190                 {
191                     return QuantizationInfo(1.f / (0.5 * qasymm8_max), int(0.5 * qasymm8_max));
192                 }
193                 else if(dt == DataType::QASYMM8_SIGNED)
194                 {
195                     return QuantizationInfo(1.f / qasymm8_signed_max, 0);
196                 }
197                 else
198                 {
199                     return default_qinfo;
200                 }
201             case ActivationLayerInfo::ActivationFunction::LOGISTIC:
202                 if(dt == DataType::QSYMM16)
203                 {
204                     return QuantizationInfo(1.f / qsymm16_max, 0);
205                 }
206                 else if(dt == DataType::QASYMM8)
207                 {
208                     return QuantizationInfo(1.f / qasymm8_max, 0);
209                 }
210                 else if(dt == DataType::QASYMM8_SIGNED)
211                 {
212                     return QuantizationInfo(1.f / (2.f * qasymm8_signed_max), -int(qasymm8_signed_max));
213                 }
214                 else
215                 {
216                     return default_qinfo;
217                 }
218             default:
219                 return default_qinfo;
220         }
221     }
222 
223 protected:
224     TensorType                              _target{};
225     SimpleTensor<T>                         _reference{};
226     bool                                    _in_place{};
227     QuantizationInfo                        _input_quantization_info{};
228     QuantizationInfo                        _output_quantization_info{};
229     DataType                                _data_type{};
230     ActivationLayerInfo::ActivationFunction _function{};
231 };
232 
233 template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
234 class ActivationValidationFixture : public ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
235 {
236 public:
237     template <typename...>
setup(TensorShape shape,bool in_place,ActivationLayerInfo::ActivationFunction function,float alpha_beta,DataType data_type)238     void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type)
239     {
240         ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, in_place, function, alpha_beta, data_type, QuantizationInfo());
241     }
242 };
243 
244 template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
245 class ActivationValidationQuantizedFixture : public ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>
246 {
247 public:
248     template <typename...>
setup(TensorShape shape,bool in_place,ActivationLayerInfo::ActivationFunction function,float alpha_beta,DataType data_type,QuantizationInfo quantization_info)249     void setup(TensorShape shape, bool in_place, ActivationLayerInfo::ActivationFunction function, float alpha_beta, DataType data_type, QuantizationInfo quantization_info)
250     {
251         ActivationValidationGenericFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, in_place, function, alpha_beta, data_type, quantization_info);
252     }
253 };
254 
255 } // namespace validation
256 } // namespace test
257 } // namespace arm_compute
258 #endif /* ARM_COMPUTE_TEST_ACTIVATION_LAYER_FIXTURE */
259