1 //
2 // Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include "ConversionUtils.hpp"
7 #include <armnnUtils/Permute.hpp>
8
9 ///
10 /// Helper classes
11 ///
12
13 namespace armnn_driver
14 {
15
LayerInputHandle()16 LayerInputHandle::LayerInputHandle()
17 : m_OutputSlot(nullptr)
18 , m_Valid(false)
19 {}
20
LayerInputHandle(bool valid,armnn::IOutputSlot * outputSlot,armnn::TensorInfo tensorInfo)21 LayerInputHandle::LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo)
22 : m_OutputSlot(outputSlot)
23 , m_Valid(valid)
24 , m_TensorInfo(tensorInfo)
25 {}
26
IsValid() const27 bool LayerInputHandle::IsValid() const
28 {
29 return m_Valid;
30 }
31
Connect(armnn::IInputSlot & inputSlot)32 void LayerInputHandle::Connect(armnn::IInputSlot& inputSlot)
33 {
34 if (!IsValid())
35 {
36 throw armnn::RuntimeException("LayerInputHandle is invalid");
37 }
38
39 if (m_OutputSlot)
40 {
41 m_OutputSlot->Connect(inputSlot);
42 }
43 }
44
Disconnect(armnn::IInputSlot & inputSlot)45 void LayerInputHandle::Disconnect(armnn::IInputSlot& inputSlot)
46 {
47 if (!IsValid())
48 {
49 throw armnn::RuntimeException("LayerInputHandle is invalid");
50 }
51 if (m_OutputSlot)
52 {
53 m_OutputSlot->Disconnect(inputSlot);
54 }
55 }
56
GetTensorInfo() const57 const armnn::TensorInfo& LayerInputHandle::GetTensorInfo() const
58 {
59 return m_TensorInfo;
60 }
61
SanitizeQuantizationScale(LayerInputHandle & weight,LayerInputHandle & input)62 void LayerInputHandle::SanitizeQuantizationScale(LayerInputHandle& weight,
63 LayerInputHandle& input)
64 {
65 if (m_OutputSlot)
66 {
67 armnn::TensorInfo weightInfo = weight.GetTensorInfo();
68 armnn::TensorInfo inputInfo = input.GetTensorInfo();
69 armnn::TensorInfo biasInfo = GetTensorInfo();
70
71 SanitizeBiasQuantizationScale(biasInfo, weightInfo, inputInfo);
72
73 m_TensorInfo = biasInfo;
74 m_OutputSlot->SetTensorInfo(biasInfo);
75 }
76 }
77
ConstTensorPin(bool optional)78 ConstTensorPin::ConstTensorPin(bool optional)
79 : m_Optional(optional)
80 {}
81
ConstTensorPin(armnn::TensorInfo & tensorInfo,const void * valueStart,uint32_t numBytes,const armnn::PermutationVector & mappings)82 ConstTensorPin::ConstTensorPin(armnn::TensorInfo& tensorInfo,
83 const void* valueStart,
84 uint32_t numBytes,
85 const armnn::PermutationVector& mappings)
86 : m_Optional(false)
87 {
88 armnn::IgnoreUnused(numBytes);
89 if (tensorInfo.GetNumBytes() != numBytes)
90 {
91 ALOGW("The size of ConstTensor does not match its TensorInfo.");
92 }
93
94 const bool needsSwizzling = (mappings.GetSize() > 0);
95 if (needsSwizzling)
96 {
97 m_SwizzledTensorData.resize(tensorInfo.GetNumBytes());
98 SwizzleAndroidNn4dTensorToArmNn(tensorInfo, valueStart, m_SwizzledTensorData.data(), mappings);
99
100 m_ConstTensor = armnn::ConstTensor(tensorInfo, m_SwizzledTensorData.data());
101 }
102 else
103 {
104 m_ConstTensor = armnn::ConstTensor(tensorInfo, valueStart);
105 }
106 }
107
IsValid() const108 bool ConstTensorPin::IsValid() const
109 {
110 return m_ConstTensor.GetMemoryArea() != nullptr;
111 }
112
IsOptional() const113 bool ConstTensorPin::IsOptional() const
114 {
115 return m_Optional;
116 }
117
GetConstTensor() const118 const armnn::ConstTensor& ConstTensorPin::GetConstTensor() const
119 {
120 return m_ConstTensor;
121 }
122
GetConstTensorPtr() const123 const armnn::ConstTensor* ConstTensorPin::GetConstTensorPtr() const
124 {
125 if (IsValid() && m_ConstTensor.GetNumElements() > 0)
126 {
127 return &m_ConstTensor;
128 }
129 // tensor is either invalid, or has no elements (indicating an optional tensor that was not provided)
130 return nullptr;
131 }
132
133 ///
134 /// Utility functions
135 ///
136
ProcessActivation(const armnn::TensorInfo & tensorInfo,ActivationFn activation,armnn::IConnectableLayer * prevLayer,ConversionData & data)137 armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
138 ActivationFn activation,
139 armnn::IConnectableLayer* prevLayer,
140 ConversionData& data)
141 {
142 if (prevLayer->GetNumOutputSlots() != 1)
143 {
144 Fail("%s: Incorrect Number of OutputSlots expected 1 was %i", __func__, prevLayer->GetNumOutputSlots());
145 return nullptr;
146 }
147 prevLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
148
149 armnn::IConnectableLayer* activationLayer = prevLayer;
150
151 if (activation != ActivationFn::kActivationNone)
152 {
153 armnn::ActivationDescriptor activationDesc;
154 switch (activation)
155 {
156 case ActivationFn::kActivationRelu:
157 {
158 activationDesc.m_Function = armnn::ActivationFunction::ReLu;
159 break;
160 }
161 case ActivationFn::kActivationRelu1:
162 {
163 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
164 activationDesc.m_A = 1.0f;
165 activationDesc.m_B = -1.0f;
166 break;
167 }
168 case ActivationFn::kActivationRelu6:
169 {
170 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
171 activationDesc.m_A = 6.0f;
172 break;
173 }
174 case ActivationFn::kActivationSigmoid:
175 {
176 activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
177 break;
178 }
179 case ActivationFn::kActivationTanh:
180 {
181 activationDesc.m_Function = armnn::ActivationFunction::TanH;
182 activationDesc.m_A = 1.0f;
183 activationDesc.m_B = 1.0f;
184 break;
185 }
186 default:
187 {
188 Fail("%s: Invalid activation enum value %i", __func__, activation);
189 return nullptr;
190 }
191 }
192
193 bool isSupported = false;
194 armnn::BackendId setBackend;
195 FORWARD_LAYER_SUPPORT_FUNC(__func__,
196 IsActivationSupported,
197 data.m_Backends,
198 isSupported,
199 setBackend,
200 prevLayer->GetOutputSlot(0).GetTensorInfo(),
201 tensorInfo,
202 activationDesc);
203 if (!isSupported)
204 {
205 return nullptr;
206 }
207
208 activationLayer = data.m_Network->AddActivationLayer(activationDesc);
209 activationLayer->SetBackendId(setBackend);
210
211 prevLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
212 activationLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
213 }
214
215 return activationLayer;
216 }
217
218 } // namespace armnn_driver
219