1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include "ConstantTestImpl.hpp"
7
8 #include <armnnUtils/QuantizeHelper.hpp>
9 #include <ResolveType.hpp>
10
11
12 #include <armnnUtils/Permute.hpp>
13
14 #include <armnn/backends/TensorHandle.hpp>
15
16 #include <armnnTestUtils/TensorCopyUtils.hpp>
17 #include <armnnTestUtils/WorkloadTestUtils.hpp>
18
19 #include <armnnTestUtils/TensorHelpers.hpp>
20
21 namespace
22 {
23
24 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
ConstantTestImpl(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory,float qScale,int32_t qOffset)25 LayerTestResult<T, 4> ConstantTestImpl(
26 armnn::IWorkloadFactory& workloadFactory,
27 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
28 const armnn::ITensorHandleFactory& tensorHandleFactory,
29 float qScale,
30 int32_t qOffset)
31 {
32 IgnoreUnused(memoryManager);
33 constexpr unsigned int inputWidth = 3;
34 constexpr unsigned int inputHeight = 4;
35 constexpr unsigned int inputChannels = 3;
36 constexpr unsigned int inputBatchSize = 2;
37
38 constexpr unsigned int outputWidth = inputWidth;
39 constexpr unsigned int outputHeight = inputHeight;
40 constexpr unsigned int outputChannels = inputChannels;
41 constexpr unsigned int outputBatchSize = inputBatchSize;
42
43 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
44 ArmnnType, qScale, qOffset);
45
46 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
47 ArmnnType, qScale, qOffset);
48
49 // Set quantization parameters if the requested type is a quantized type.
50 if(armnn::IsQuantizedType<T>())
51 {
52 inputTensorInfo.SetQuantizationScale(qScale);
53 inputTensorInfo.SetQuantizationOffset(qOffset);
54 outputTensorInfo.SetQuantizationScale(qScale);
55 outputTensorInfo.SetQuantizationOffset(qOffset);
56 }
57
58 auto input = armnnUtils::QuantizedVector<T>(
59 {
60 // Batch 0, Channel 0
61 235.0f, 46.0f, 178.0f,
62 100.0f, 123.0f, 19.0f,
63 172.0f, 74.0f, 250.0f,
64 6.0f, 195.0f, 80.0f,
65
66 // Batch 0, Channel 1
67 113.0f, 95.0f, 202.0f,
68 77.0f, 114.0f, 71.0f,
69 122.0f, 246.0f, 166.0f,
70 82.0f, 28.0f, 37.0f,
71
72 // Batch 0, Channel 2
73 56.0f, 170.0f, 162.0f,
74 194.0f, 89.0f, 254.0f,
75 12.0f, 209.0f, 200.0f,
76 1.0f, 64.0f, 54.0f,
77
78 // Batch 1, Channel 0
79 67.0f, 90.0f, 49.0f,
80 7.0f, 163.0f, 18.0f,
81 25.0f, 117.0f, 103.0f,
82 247.0f, 59.0f, 189.0f,
83
84 // Batch 1, Channel 1
85 239.0f, 104.0f, 199.0f,
86 17.0f, 124.0f, 153.0f,
87 222.0f, 217.0f, 75.0f,
88 32.0f, 126.0f, 21.0f,
89
90 // Batch 1, Channel 2
91 97.0f, 145.0f, 215.0f,
92 115.0f, 116.0f, 238.0f,
93 226.0f, 16.0f, 132.0f,
94 92.0f, 125.0f, 88.0f,
95 },
96 qScale, qOffset);
97
98 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
99
100 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
101
102 armnn::ScopedTensorHandle constantTensor(inputTensorInfo);
103 AllocateAndCopyDataToITensorHandle(&constantTensor, input.data());
104
105 armnn::ConstantQueueDescriptor descriptor;
106 descriptor.m_LayerOutput = &constantTensor;
107
108 armnn::WorkloadInfo info;
109 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
110
111 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Constant,
112 descriptor,
113 info);
114
115 outputHandle->Allocate();
116
117 workload->PostAllocationConfigure();
118 workload->Execute();
119
120 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
121
122 return LayerTestResult<T, 4>(actualOutput,
123 input,
124 outputHandle->GetShape(),
125 outputTensorInfo.GetShape());
126 }
127
128 } // anonymous namespace
129
ConstantTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)130 LayerTestResult<float, 4> ConstantTest(
131 armnn::IWorkloadFactory& workloadFactory,
132 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
133 const armnn::ITensorHandleFactory& tensorHandleFactory)
134 {
135 return ConstantTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, 0.0f, 0);
136 }
137
ConstantInt16SimpleQuantizationScaleNoOffsetTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)138 LayerTestResult<int16_t, 4> ConstantInt16SimpleQuantizationScaleNoOffsetTest(
139 armnn::IWorkloadFactory& workloadFactory,
140 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
141 const armnn::ITensorHandleFactory& tensorHandleFactory)
142 {
143 return ConstantTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
144 }
145
ConstantUint8SimpleQuantizationScaleNoOffsetTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)146 LayerTestResult<uint8_t, 4> ConstantUint8SimpleQuantizationScaleNoOffsetTest(
147 armnn::IWorkloadFactory& workloadFactory,
148 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
149 const armnn::ITensorHandleFactory& tensorHandleFactory)
150 {
151 return ConstantTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 1.0f, 0);
152 }
153
ConstantUint8CustomQuantizationScaleAndOffsetTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)154 LayerTestResult<uint8_t, 4> ConstantUint8CustomQuantizationScaleAndOffsetTest(
155 armnn::IWorkloadFactory& workloadFactory,
156 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
157 const armnn::ITensorHandleFactory& tensorHandleFactory)
158 {
159 return ConstantTestImpl<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory, 2e-6f, 1);
160 }
161
ConstantInt16CustomQuantizationScaleAndOffsetTest(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)162 LayerTestResult<int16_t, 4> ConstantInt16CustomQuantizationScaleAndOffsetTest(
163 armnn::IWorkloadFactory& workloadFactory,
164 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
165 const armnn::ITensorHandleFactory& tensorHandleFactory)
166 {
167 return ConstantTestImpl<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory, 2e-6f, 1);
168 }
169