xref: /aosp_15_r20/external/ComputeLibrary/tests/validation/fixtures/DepthConvertLayerFixture.h (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2017-2022 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_TEST_DEPTH_CONVERT_FIXTURE
25 #define ARM_COMPUTE_TEST_DEPTH_CONVERT_FIXTURE
26 
27 #include "arm_compute/core/TensorShape.h"
28 #include "arm_compute/core/Types.h"
29 #include "tests/AssetsLibrary.h"
30 #include "tests/Globals.h"
31 #include "tests/IAccessor.h"
32 #include "tests/framework/Asserts.h"
33 #include "tests/framework/Fixture.h"
34 #include "tests/validation/Helpers.h"
35 #include "tests/validation/reference/DepthConvertLayer.h"
36 
37 namespace arm_compute
38 {
39 namespace test
40 {
41 namespace validation
42 {
43 /*  This function ignores the scale and zeroPoint of quanized tensors, i.e. QASYMM8 input is treated as uint8 values.*/
44 template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2>
45 class DepthConvertLayerValidationBaseFixture : public framework::Fixture
46 {
47 public:
48     template <typename...>
setup(TensorShape shape,DataType dt_in,DataType dt_out,ConvertPolicy policy,uint32_t shift,QuantizationInfo quantization_info)49     void setup(TensorShape shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t shift, QuantizationInfo quantization_info)
50     {
51         _shift             = shift;
52         _quantization_info = quantization_info;
53         _target            = compute_target(shape, dt_in, dt_out, policy, shift);
54         _reference         = compute_reference(shape, dt_in, dt_out, policy, shift);
55     }
56 
57 protected:
58     template <typename U>
fill(U && tensor,int i,DataType dt_in,DataType dt_out)59     void fill(U &&tensor, int i, DataType dt_in, DataType dt_out)
60     {
61         if(is_data_type_quantized(tensor.data_type()))
62         {
63             std::pair<int, int> bounds = get_quantized_bounds(tensor.quantization_info(), -1.0f, 1.0f);
64             std::uniform_int_distribution<uint32_t> distribution(bounds.first, bounds.second);
65 
66             library->fill(tensor, distribution, i);
67         }
68         else
69         {
70             // When converting S32 to F16, both reference and Compute Library implementations are + or - infinity outside the F16 range.
71             if(dt_in == DataType::S32 && dt_out == DataType::F16)
72             {
73                 std::uniform_int_distribution<int32_t> distribution_s32(-65504, 65504);
74                 library->fill(tensor, distribution_s32, i);
75             }
76             else
77             {
78                 library->fill_tensor_uniform(tensor, i);
79             }
80         }
81     }
82 
compute_target(const TensorShape & shape,DataType dt_in,DataType dt_out,ConvertPolicy policy,uint32_t shift)83     TensorType compute_target(const TensorShape &shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t shift)
84     {
85         // Create tensors
86         TensorType src = create_tensor<TensorType>(shape, dt_in, 1, _quantization_info);
87         TensorType dst = create_tensor<TensorType>(shape, dt_out, 1, _quantization_info);
88 
89         // Create and configure function
90         FunctionType depth_convert;
91         depth_convert.configure(&src, &dst, policy, shift);
92 
93         ARM_COMPUTE_ASSERT(src.info()->is_resizable());
94         ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
95 
96         // Allocate tensors
97         src.allocator()->allocate();
98         dst.allocator()->allocate();
99 
100         ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
101         ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
102 
103         // Fill tensors
104         fill(AccessorType(src), 0, dt_in, dt_out);
105 
106         // Compute function
107         depth_convert.run();
108 
109         return dst;
110     }
111 
compute_reference(const TensorShape & shape,DataType dt_in,DataType dt_out,ConvertPolicy policy,uint32_t shift)112     SimpleTensor<T2> compute_reference(const TensorShape &shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t shift)
113     {
114         // Create reference
115         SimpleTensor<T1> src{ shape, dt_in, 1, _quantization_info };
116 
117         // Fill reference
118         fill(src, 0, dt_in, dt_out);
119 
120         return reference::depth_convert<T1, T2>(src, dt_out, policy, shift);
121     }
122 
123     TensorType       _target{};
124     SimpleTensor<T2> _reference{};
125     int              _shift{};
126     QuantizationInfo _quantization_info{};
127 };
128 
129 template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2>
130 class DepthConvertLayerValidationFixture : public DepthConvertLayerValidationBaseFixture<TensorType, AccessorType, FunctionType, T1, T2>
131 {
132 public:
133     template <typename...>
setup(TensorShape shape,DataType dt_in,DataType dt_out,ConvertPolicy policy,uint32_t shift)134     void setup(TensorShape shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t shift)
135     {
136         DepthConvertLayerValidationBaseFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, dt_in, dt_out, policy,
137                                                                                                       shift, QuantizationInfo());
138     }
139 };
140 
141 template <typename TensorType, typename AccessorType, typename FunctionType, typename T1, typename T2>
142 class DepthConvertLayerValidationQuantizedFixture : public DepthConvertLayerValidationBaseFixture<TensorType, AccessorType, FunctionType, T1, T2>
143 {
144 public:
145     template <typename...>
setup(TensorShape shape,DataType dt_in,DataType dt_out,ConvertPolicy policy,uint32_t shift,QuantizationInfo quantization_info)146     void setup(TensorShape shape, DataType dt_in, DataType dt_out, ConvertPolicy policy, uint32_t shift, QuantizationInfo quantization_info)
147     {
148         DepthConvertLayerValidationBaseFixture<TensorType, AccessorType, FunctionType, T1, T2>::setup(shape, dt_in, dt_out, policy,
149                                                                                                       shift, quantization_info);
150     }
151 };
152 } // namespace validation
153 } // namespace test
154 } // namespace arm_compute
155 #endif /* ARM_COMPUTE_TEST_DEPTH_CONVERT_FIXTURE */
156