1 //
2 // Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include "ReductionTestImpl.hpp"
7
8 #include <DataTypeUtils.hpp>
9 #include <armnnTestUtils/TensorCopyUtils.hpp>
10 #include <armnnTestUtils/WorkloadTestUtils.hpp>
11
12 #include <armnnTestUtils/TensorHelpers.hpp>
13
14 #include <iostream>
15
16 namespace
17 {
18
19 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
ReductionTestCommon(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,const armnn::TensorInfo inputTensorInfo,const armnn::TensorInfo outputTensorInfo,const std::vector<float> & inputData,const std::vector<float> & outputData,const std::vector<int32_t> vAxis,const armnn::ReduceOperation reduceOperation,bool keepDims=false)20 LayerTestResult<float, 4> ReductionTestCommon(
21 armnn::IWorkloadFactory& workloadFactory,
22 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
23 const armnn::ITensorHandleFactory& tensorHandleFactory,
24 const armnn::TensorInfo inputTensorInfo,
25 const armnn::TensorInfo outputTensorInfo,
26 const std::vector<float>& inputData,
27 const std::vector<float>& outputData,
28 const std::vector<int32_t> vAxis,
29 const armnn::ReduceOperation reduceOperation,
30 bool keepDims = false)
31 {
32 IgnoreUnused(memoryManager);
33 auto inputTensor = ConvertToDataType<ArmnnType>(inputData, inputTensorInfo);
34
35 std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
36
37 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
38 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
39
40 armnn::ReduceQueueDescriptor descriptor;
41 std::vector<uint32_t> updated_idx;
42 uint32_t resolvedAxis = 0;
43 for (uint32_t i = 0; i < vAxis.size(); ++i)
44 {
45 if (vAxis[i] < 0)
46 {
47 resolvedAxis = inputTensorInfo.GetNumDimensions() + static_cast<uint32_t>(vAxis[i]);
48 } else
49 {
50 resolvedAxis = static_cast<uint32_t>(vAxis[i]);
51 }
52
53 updated_idx.push_back(resolvedAxis);
54 }
55
56 descriptor.m_Parameters.m_vAxis = updated_idx;
57 descriptor.m_Parameters.m_ReduceOperation = reduceOperation;
58 descriptor.m_Parameters.m_KeepDims = keepDims;
59 armnn::WorkloadInfo info;
60
61 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
62 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
63
64 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Reduce,
65 descriptor,
66 info);
67
68 inputHandle->Allocate();
69 outputHandle->Allocate();
70
71 CopyDataToITensorHandle(inputHandle.get(), inputTensor.data());
72
73 workload->Execute();
74
75 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
76
77 return LayerTestResult<float, 4>(actualOutput,
78 outputData,
79 outputHandle->GetShape(),
80 outputTensorInfo.GetShape());
81 }
82
83 } // namespace
84
85 template<armnn::DataType ArmnnType, typename T>
ReduceMaxSimpleTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)86 LayerTestResult<float, 4> ReduceMaxSimpleTest(
87 armnn::IWorkloadFactory& workloadFactory,
88 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
89 const armnn::ITensorHandleFactory& tensorHandleFactory)
90 {
91 const armnn::TensorShape inputShape{ 1, 1, 2, 3 };
92 const armnn::TensorShape outputShape{ 1, 1, 1, 3};
93
94 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
95
96 if (armnn::IsQuantizedType<T>())
97 {
98 inputTensorInfo.SetQuantizationScale(1.0f);
99 inputTensorInfo.SetQuantizationOffset(0);
100 }
101
102 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
103
104 std::vector<float> inputValues
105 ({
106 1001.0f, 11.0f, 1003.0f,
107 10.0f, 1002.0f, 12.0f
108 });
109 std::vector<float> outputValues
110 ({
111 1001.0f, 1002.0f, 1003.0f
112 });
113
114 return ReductionTestCommon<ArmnnType>(workloadFactory,
115 memoryManager,
116 tensorHandleFactory,
117 inputTensorInfo,
118 outputTensorInfo,
119 inputValues,
120 outputValues,
121 { 2 },
122 armnn::ReduceOperation::Max);
123 }
124
125 template<armnn::DataType ArmnnType, typename T>
ReduceMaxNegativeAxisTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)126 LayerTestResult<float, 4> ReduceMaxNegativeAxisTest(
127 armnn::IWorkloadFactory& workloadFactory,
128 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
129 const armnn::ITensorHandleFactory& tensorHandleFactory)
130 {
131 const armnn::TensorShape inputShape{ 1, 1, 2, 3 };
132 const armnn::TensorShape outputShape{ 1, 1, 2, 1};
133
134 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
135
136 if (armnn::IsQuantizedType<T>())
137 {
138 inputTensorInfo.SetQuantizationScale(1.0f);
139 inputTensorInfo.SetQuantizationOffset(0);
140 }
141
142 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
143
144 std::vector<float> inputValues
145 ({
146 1001.0f, 11.0f, 1003.0f,
147 10.0f, 1002.0f, 12.0f
148 });
149 std::vector<float> outputValues
150 ({
151 1003.0f, 1002.0f
152 });
153
154 return ReductionTestCommon<ArmnnType>(workloadFactory,
155 memoryManager,
156 tensorHandleFactory,
157 inputTensorInfo,
158 outputTensorInfo,
159 inputValues,
160 outputValues,
161 { -1 },
162 armnn::ReduceOperation::Max,
163 true);
164 }
165
166 template<armnn::DataType ArmnnType, typename T>
ReduceMaxSimpleTest2(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)167 LayerTestResult<float, 4> ReduceMaxSimpleTest2(
168 armnn::IWorkloadFactory& workloadFactory,
169 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
170 const armnn::ITensorHandleFactory& tensorHandleFactory)
171 {
172 const armnn::TensorShape inputShape{ 1, 1, 2, 3 };
173 const armnn::TensorShape outputShape{ 1, 1, 2, 1 };
174
175 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
176
177 if (armnn::IsQuantizedType<T>())
178 {
179 inputTensorInfo.SetQuantizationScale(1.0f);
180 inputTensorInfo.SetQuantizationOffset(0);
181 }
182
183 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
184
185 std::vector<float> inputValues
186 ({
187 1.0f, 3.0f, 2.0f,
188 6.0f, 4.0f, 5.0f
189 });
190
191 std::vector<float> outputValues
192 ({
193 3.0f, 6.0f
194 });
195
196 return ReductionTestCommon<ArmnnType>(workloadFactory,
197 memoryManager,
198 tensorHandleFactory,
199 inputTensorInfo,
200 outputTensorInfo,
201 inputValues,
202 outputValues,
203 { 3 },
204 armnn::ReduceOperation::Max,
205 true);
206 }
207
208 template<armnn::DataType ArmnnType, typename T>
ReduceMinSimpleTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)209 LayerTestResult<float, 4> ReduceMinSimpleTest(
210 armnn::IWorkloadFactory& workloadFactory,
211 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
212 const armnn::ITensorHandleFactory& tensorHandleFactory)
213 {
214 const armnn::TensorShape inputShape { 1, 1, 2, 3 };
215 const armnn::TensorShape outputShape { 1, 1, 1, 3};
216
217 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
218
219 if (armnn::IsQuantizedType<T>())
220 {
221 inputTensorInfo.SetQuantizationScale(1.0f);
222 inputTensorInfo.SetQuantizationOffset(0);
223 }
224
225 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
226
227 std::vector<float> inputValues
228 ({
229 1001.0f, 11.0f, 1003.0f,
230 10.0f, 1002.0f, 12.0f
231 });
232 std::vector<float> outputValues
233 ({
234 10.0f, 11.0f, 12.0f
235 });
236
237 return ReductionTestCommon<ArmnnType>(workloadFactory,
238 memoryManager,
239 tensorHandleFactory,
240 inputTensorInfo,
241 outputTensorInfo,
242 inputValues,
243 outputValues,
244 { 2 },
245 armnn::ReduceOperation::Min);
246 }
247
248 template<armnn::DataType ArmnnType, typename T>
ReduceMinNegativeAxisTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)249 LayerTestResult<float, 4> ReduceMinNegativeAxisTest(
250 armnn::IWorkloadFactory& workloadFactory,
251 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
252 const armnn::ITensorHandleFactory& tensorHandleFactory)
253 {
254 const armnn::TensorShape inputShape{ 1, 1, 2, 3 };
255 const armnn::TensorShape outputShape{ 1, 1, 2, 1};
256
257 armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
258
259 if (armnn::IsQuantizedType<T>())
260 {
261 inputTensorInfo.SetQuantizationScale(1.0f);
262 inputTensorInfo.SetQuantizationOffset(0);
263 }
264
265 armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32);
266
267 std::vector<float> inputValues
268 ({
269 1001.0f, 11.0f, 1003.0f,
270 10.0f, 1002.0f, 12.0f
271 });
272 std::vector<float> outputValues
273 ({
274 11.0f, 10.0f
275 });
276
277 return ReductionTestCommon<ArmnnType>(workloadFactory,
278 memoryManager,
279 tensorHandleFactory,
280 inputTensorInfo,
281 outputTensorInfo,
282 inputValues,
283 outputValues,
284 { -1 },
285 armnn::ReduceOperation::Min,
286 true);
287 }
288
289 // Explicit template specializations
290 template LayerTestResult<float, 4>
291 ReduceMaxSimpleTest<armnn::DataType::Float32>(
292 armnn::IWorkloadFactory& workloadFactory,
293 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
294 const armnn::ITensorHandleFactory& tensorHandleFactory);
295
296 template LayerTestResult<float, 4>
297 ReduceMaxNegativeAxisTest<armnn::DataType::Float32>(
298 armnn::IWorkloadFactory& workloadFactory,
299 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
300 const armnn::ITensorHandleFactory& tensorHandleFactory);
301
302 template LayerTestResult<float, 4>
303 ReduceMaxSimpleTest2<armnn::DataType::Float32>(
304 armnn::IWorkloadFactory& workloadFactory,
305 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
306 const armnn::ITensorHandleFactory& tensorHandleFactory);
307
308 template LayerTestResult<float, 4>
309 ReduceMinSimpleTest<armnn::DataType::Float32>(
310 armnn::IWorkloadFactory& workloadFactory,
311 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
312 const armnn::ITensorHandleFactory& tensorHandleFactory);
313
314 template LayerTestResult<float, 4>
315 ReduceMinNegativeAxisTest<armnn::DataType::Float32>(
316 armnn::IWorkloadFactory& workloadFactory,
317 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
318 const armnn::ITensorHandleFactory& tensorHandleFactory);
319
320