xref: /aosp_15_r20/external/ComputeLibrary/tests/validation/fixtures/SplitFixture.h (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2018-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_TEST_SPLIT_FIXTURE
25 #define ARM_COMPUTE_TEST_SPLIT_FIXTURE
26 
27 #include "arm_compute/core/TensorShape.h"
28 #include "arm_compute/core/Types.h"
29 
30 #include "tests/AssetsLibrary.h"
31 #include "tests/Globals.h"
32 #include "tests/IAccessor.h"
33 #include "tests/framework/Asserts.h"
34 #include "tests/framework/Fixture.h"
35 #include "tests/validation/Helpers.h"
36 #include "tests/validation/reference/SliceOperations.h"
37 
38 #include <algorithm>
39 
40 namespace arm_compute
41 {
42 namespace test
43 {
44 namespace validation
45 {
46 template <typename TensorType, typename ITensorType, typename AccessorType, typename FunctionType, typename T>
47 class SplitFixture : public framework::Fixture
48 {
49 public:
50     template <typename...>
setup(TensorShape shape,unsigned int axis,unsigned int splits,DataType data_type)51     void setup(TensorShape shape, unsigned int axis, unsigned int splits, DataType data_type)
52     {
53         _target    = compute_target(shape, axis, splits, data_type);
54         _reference = compute_reference(shape, axis, splits, data_type);
55     }
56 
57 protected:
58     template <typename U>
fill(U && tensor,int i)59     void fill(U &&tensor, int i)
60     {
61         library->fill_tensor_uniform(tensor, i);
62     }
63 
compute_target(const TensorShape & shape,unsigned int axis,unsigned int splits,DataType data_type)64     std::vector<TensorType> compute_target(const TensorShape &shape, unsigned int axis, unsigned int splits, DataType data_type)
65     {
66         // Create tensors
67         TensorType                 src = create_tensor<TensorType>(shape, data_type);
68         std::vector<TensorType>    dsts(splits);
69         std::vector<ITensorType *> dsts_ptr;
70         for(auto &dst : dsts)
71         {
72             dsts_ptr.emplace_back(&dst);
73         }
74 
75         // Create and configure function
76         FunctionType split;
77         split.configure(&src, dsts_ptr, axis);
78 
79         ARM_COMPUTE_ASSERT(src.info()->is_resizable());
80         ARM_COMPUTE_EXPECT(std::all_of(dsts.cbegin(), dsts.cend(), [](const TensorType & t)
81         {
82             return t.info()->is_resizable();
83         }),
84         framework::LogLevel::ERRORS);
85 
86         // Allocate tensors
87         src.allocator()->allocate();
88         for(unsigned int i = 0; i < splits; ++i)
89         {
90             dsts[i].allocator()->allocate();
91         }
92 
93         ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
94         ARM_COMPUTE_EXPECT(std::all_of(dsts.cbegin(), dsts.cend(), [](const TensorType & t)
95         {
96             return !t.info()->is_resizable();
97         }),
98         framework::LogLevel::ERRORS);
99 
100         // Fill tensors
101         fill(AccessorType(src), 0);
102 
103         // Compute function
104         split.run();
105 
106         return dsts;
107     }
108 
compute_reference(const TensorShape & shape,unsigned int axis,unsigned int splits,DataType data_type)109     std::vector<SimpleTensor<T>> compute_reference(const TensorShape &shape, unsigned int axis, unsigned int splits, DataType data_type)
110     {
111         // Create reference
112         SimpleTensor<T>              src{ shape, data_type };
113         std::vector<SimpleTensor<T>> dsts;
114 
115         // Fill reference
116         fill(src, 0);
117 
118         // Calculate splice for each split
119         const size_t axis_split_step = shape[axis] / splits;
120         unsigned int axis_offset     = 0;
121 
122         // Start/End coordinates
123         Coordinates start_coords;
124         Coordinates end_coords;
125         for(unsigned int d = 0; d < shape.num_dimensions(); ++d)
126         {
127             end_coords.set(d, -1);
128         }
129 
130         for(unsigned int i = 0; i < splits; ++i)
131         {
132             // Update coordinate on axis
133             start_coords.set(axis, axis_offset);
134             end_coords.set(axis, axis_offset + axis_split_step);
135 
136             dsts.emplace_back(std::move(reference::slice(src, start_coords, end_coords)));
137 
138             axis_offset += axis_split_step;
139         }
140 
141         return dsts;
142     }
143 
144     std::vector<TensorType>      _target{};
145     std::vector<SimpleTensor<T>> _reference{};
146 };
147 
148 template <typename TensorType, typename ITensorType, typename AccessorType, typename FunctionType, typename T>
149 class SplitShapesFixture : public framework::Fixture
150 {
151 public:
152     template <typename...>
setup(TensorShape shape,unsigned int axis,std::vector<TensorShape> split_shapes,DataType data_type)153     void setup(TensorShape shape, unsigned int axis, std::vector<TensorShape> split_shapes, DataType data_type)
154     {
155         _target    = compute_target(shape, axis, split_shapes, data_type);
156         _reference = compute_reference(shape, axis, split_shapes, data_type);
157     }
158 
159 protected:
160     template <typename U>
fill(U && tensor,int i)161     void fill(U &&tensor, int i)
162     {
163         library->fill_tensor_uniform(tensor, i);
164     }
165 
compute_target(TensorShape shape,unsigned int axis,std::vector<TensorShape> split_shapes,DataType data_type)166     std::vector<TensorType> compute_target(TensorShape shape, unsigned int axis, std::vector<TensorShape> split_shapes, DataType data_type)
167     {
168         // Create tensors
169         TensorType                 src = create_tensor<TensorType>(shape, data_type);
170         std::vector<TensorType>    dsts{};
171         std::vector<ITensorType *> dsts_ptr;
172 
173         for(const auto &split_shape : split_shapes)
174         {
175             TensorType dst = create_tensor<TensorType>(split_shape, data_type);
176             dsts.push_back(std::move(dst));
177         }
178 
179         for(auto &dst : dsts)
180         {
181             dsts_ptr.emplace_back(&dst);
182         }
183 
184         // Create and configure function
185         FunctionType split;
186         split.configure(&src, dsts_ptr, axis);
187 
188         ARM_COMPUTE_ASSERT(src.info()->is_resizable());
189         ARM_COMPUTE_EXPECT(std::all_of(dsts.cbegin(), dsts.cend(), [](const TensorType & t)
190         {
191             return t.info()->is_resizable();
192         }),
193         framework::LogLevel::ERRORS);
194 
195         // Allocate tensors
196         src.allocator()->allocate();
197         for(unsigned int i = 0; i < dsts.size(); ++i)
198         {
199             dsts[i].allocator()->allocate();
200         }
201 
202         ARM_COMPUTE_ASSERT(!src.info()->is_resizable());
203         ARM_COMPUTE_EXPECT(std::all_of(dsts.cbegin(), dsts.cend(), [](const TensorType & t)
204         {
205             return !t.info()->is_resizable();
206         }),
207         framework::LogLevel::ERRORS);
208 
209         // Fill tensors
210         fill(AccessorType(src), 0);
211 
212         // Compute function
213         split.run();
214 
215         return dsts;
216     }
217 
compute_reference(TensorShape shape,unsigned int axis,std::vector<TensorShape> split_shapes,DataType data_type)218     std::vector<SimpleTensor<T>> compute_reference(TensorShape shape, unsigned int axis, std::vector<TensorShape> split_shapes, DataType data_type)
219     {
220         // Create reference
221         SimpleTensor<T>              src{ shape, data_type };
222         std::vector<SimpleTensor<T>> dsts;
223 
224         // Fill reference
225         fill(src, 0);
226 
227         unsigned int axis_offset{ 0 };
228         for(const auto &split_shape : split_shapes)
229         {
230             // Calculate splice for each split
231             const size_t axis_split_step = split_shape[axis];
232 
233             // Start/End coordinates
234             Coordinates start_coords;
235             Coordinates end_coords;
236             for(unsigned int d = 0; d < shape.num_dimensions(); ++d)
237             {
238                 end_coords.set(d, -1);
239             }
240 
241             // Update coordinate on axis
242             start_coords.set(axis, axis_offset);
243             end_coords.set(axis, axis_offset + axis_split_step);
244 
245             dsts.emplace_back(std::move(reference::slice(src, start_coords, end_coords)));
246 
247             axis_offset += axis_split_step;
248         }
249 
250         return dsts;
251     }
252 
253     std::vector<TensorType>      _target{};
254     std::vector<SimpleTensor<T>> _reference{};
255 };
256 } // namespace validation
257 } // namespace test
258 } // namespace arm_compute
259 #endif /* ARM_COMPUTE_TEST_SPLIT_FIXTURE */
260