xref: /aosp_15_r20/external/ComputeLibrary/tests/validation/fixtures/dynamic_fusion/operators/ClampFixture.h (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2022-2023 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_CLAMPFIXTURE
25 #define TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_CLAMPFIXTURE
26 
27 #include "arm_compute/core/CL/CLKernelLibrary.h"
28 #include "arm_compute/core/TensorInfo.h"
29 #include "arm_compute/core/Types.h"
30 #include "arm_compute/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.h"
31 #include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h"
32 #include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h"
33 
34 #include "tests/framework/Fixture.h"
35 #include "tests/validation/reference/ActivationLayer.h"
36 
37 using namespace arm_compute::experimental::dynamic_fusion;
38 
39 namespace arm_compute
40 {
41 namespace test
42 {
43 namespace validation
44 {
45 template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
46 class DynamicFusionClampValidationFixture : public framework::Fixture
47 {
48 public:
49     template <typename...>
setup(TensorShape shape,ClampAttributes attributes,bool fuse,DataType data_type)50     void setup(TensorShape shape, ClampAttributes attributes, bool fuse, DataType data_type)
51     {
52         // CLAMP is implemented as LU_BOUNDED_RELU with the alpha and beta variables swapped.
53         ActivationLayerInfo act_info{ ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, attributes.max_val(), attributes.min_val() };
54 
55         _fuse       = fuse;
56         _attributes = attributes;
57         _data_type  = data_type;
58         _target     = compute_target(shape, attributes);
59         _reference  = compute_reference(shape, act_info);
60     }
61 
62 protected:
get_boundary_values(T min,T max)63     std::vector<T> get_boundary_values(T min, T max)
64     {
65         // This function will return a vector filled with the following values that can
66         // represent two partitions derived from equivalent partitioning.
67         // * Lower partition: min, min + delta, lower quarter (nominal), center - delta
68         // * Upper partition: center, center + delta, upper quarter (nominal), max - delta, max
69         const auto delta         = is_data_type_float(_data_type) ? T(0.1f) : T(1);
70         const auto center_value  = (min + max) / 2;
71         const auto lower_quarter = (min + center_value) / 2;
72         const auto upper_quarter = (center_value + max) / 2;
73 
74         std::vector<T> boundary_values{};
75 
76         // To ensure all the inserted values are within the given range after subtracing/adding delta
77         auto insert_values = [&boundary_values, &min, &max](const std::initializer_list<T> &new_values)
78         {
79             for(auto &v : new_values)
80             {
81                 if(v >= min && v <= max)
82                 {
83                     boundary_values.emplace_back(v);
84                 }
85             }
86         };
87 
88         insert_values({ min, static_cast<T>(min + delta), static_cast<T>(lower_quarter), static_cast<T>(center_value - delta) });                               // lower partition
89         insert_values({ static_cast<T>(center_value), static_cast<T>(center_value + delta), static_cast<T>(upper_quarter), static_cast<T>(max - delta), max }); // upper partition
90 
91         return boundary_values;
92     }
93 
94     template <typename U>
fill(U && tensor)95     void fill(U &&tensor)
96     {
97         float min_bound = 0;
98         float max_bound = 0;
99         std::tie(min_bound, max_bound) = get_activation_layer_test_bounds<T>(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, _data_type);
100         library->fill_static_values(tensor, get_boundary_values(static_cast<T>(min_bound), static_cast<T>(max_bound)));
101     }
102 
compute_target(const TensorShape & shape,ClampAttributes attributes)103     TensorType compute_target(const TensorShape &shape, ClampAttributes attributes)
104     {
105         // Create a new workload sketch
106         CLCompileContext   cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
107         GpuWorkloadContext gpu_ctx{ &cl_compile_ctx };
108         GpuWorkloadSketch  sketch{ &gpu_ctx };
109 
110         // Create sketch tensors
111         TensorInfo src_info = sketch.create_tensor_info(TensorInfo(shape, 1, _data_type));
112         TensorInfo dst_info = sketch.create_tensor_info(TensorInfo(shape, 1, _data_type));
113 
114         ITensorInfo *ans_0_info = FunctionType::create_op(sketch, &src_info, attributes);
115         if(_fuse)
116         {
117             ITensorInfo *ans_1_info = FunctionType::create_op(sketch, ans_0_info, attributes);
118             GpuOutput::create_op(sketch, ans_1_info, &dst_info);
119         }
120         else
121         {
122             GpuOutput::create_op(sketch, ans_0_info, &dst_info);
123         }
124 
125         // Configure runtime
126         ClWorkloadRuntime runtime;
127         runtime.configure(sketch);
128 
129         // Construct user tensors
130         TensorType t_src{};
131         TensorType t_dst{};
132 
133         // Initialize user tensors
134         t_src.allocator()->init(src_info);
135         t_dst.allocator()->init(dst_info);
136 
137         // Allocate and fill user tensors
138         t_src.allocator()->allocate();
139         t_dst.allocator()->allocate();
140 
141         fill(AccessorType(t_src));
142 
143         // Run runtime
144         runtime.run({ &t_src, &t_dst });
145 
146         return t_dst;
147     }
148 
compute_reference(const TensorShape & shape,ActivationLayerInfo act_info)149     SimpleTensor<T> compute_reference(const TensorShape &shape, ActivationLayerInfo act_info)
150     {
151         // Create reference
152         SimpleTensor<T> src{ shape, _data_type, 1, _quantization_info };
153 
154         // Fill reference
155         fill(src);
156 
157         auto dst = reference::activation_layer<T>(src, act_info, _quantization_info);
158         return dst;
159     }
160 
161 protected:
162     QuantizationInfo _quantization_info{};
163     ClampAttributes  _attributes{};
164     bool             _fuse{ false };
165     DataType         _data_type{};
166     TensorType       _target{};
167     SimpleTensor<T>  _reference{};
168 };
169 } // namespace validation
170 } // namespace test
171 } // namespace arm_compute
172 #endif /* TESTS_VALIDATION_FIXTURES_DYNAMIC_FUSION_OPERATORS_CLAMPFIXTURE */
173