1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #define LOG_TAG "ArmnnDriver"
7
8 #include "ModelToINetworkConverter.hpp"
9 #include "Utils.hpp"
10
11 #include <log/log.h>
12 #include <type_traits>
13
14 #ifdef ARMNN_ANDROID_S
15 #include <LegacyUtils.h>
16 #endif
17
18 namespace armnn_driver
19 {
20
21 template<typename HalPolicy>
ModelToINetworkConverter(const std::vector<armnn::BackendId> & backends,const HalModel & model,const std::set<unsigned int> & forcedUnsupportedOperations)22 ModelToINetworkConverter<HalPolicy>::ModelToINetworkConverter(const std::vector<armnn::BackendId>& backends,
23 const HalModel& model,
24 const std::set<unsigned int>& forcedUnsupportedOperations)
25 : m_Data(backends)
26 , m_Model(model)
27 , m_ForcedUnsupportedOperations(forcedUnsupportedOperations)
28 , m_ConversionResult(ConversionResult::Success)
29 {
30 try
31 {
32 Convert();
33 }
34 catch (std::exception& e)
35 {
36 m_ConversionResult = ConversionResult::UnsupportedFeature;
37 ALOGE("%s: Unexpected exception: %s", __func__, e.what());
38 }
39 }
40
41 template<typename HalPolicy>
Convert()42 void ModelToINetworkConverter<HalPolicy>::Convert()
43 {
44 using HalModel = typename HalPolicy::Model;
45 using HalOperand = typename HalPolicy::Operand;
46 using HalOperandType = typename HalPolicy::OperandType;
47
48 ALOGV("ModelToINetworkConverter::Convert(): %s", GetModelSummary<HalModel>(m_Model).c_str());
49
50 // map the memory pool into shared pointers
51 m_Data.m_MemPools.clear();
52 #if !defined(ARMNN_ANDROID_S)
53 if (!setRunTimePoolInfosFromHidlMemories(&m_Data.m_MemPools, m_Model.pools))
54 #else
55 if (!setRunTimePoolInfosFromCanonicalMemories(&m_Data.m_MemPools, uncheckedConvert(m_Model.pools)))
56 #endif
57 {
58 Fail("%s: Setting of run time pool infos from Hidl Memories has failed.", __func__);
59 m_ConversionResult = ConversionResult::ErrorMappingPools;
60 return;
61 }
62
63
64 uint32_t totalPoolSize = 0;
65 for (auto&& pool : m_Model.pools)
66 {
67 totalPoolSize += pool.size();
68 }
69
70 using NetworkOptions = std::vector<armnn::BackendOptions>;
71 NetworkOptions networkOptions;
72 armnn::BackendOptions shapeInferenceMethodOption("ShapeInferenceMethod",
73 {
74 { "InferAndValidate", true }
75 });
76
77 networkOptions.push_back(shapeInferenceMethodOption);
78
79 // Create armnn::INetwork
80 m_Data.m_Network = armnn::INetwork::Create(networkOptions);
81
82 // add operations to it
83 // track which layer outputs each operand
84 ALOGV("ModelToINetworkConverter::Convert(): m_OutputSlotForOperand");
85 m_Data.m_OutputSlotForOperand = std::vector<armnn::IOutputSlot*>(getMainModel(m_Model).operands.size(), nullptr);
86 try
87 {
88 ALOGV("ModelToINetworkConverter::Convert(): for getMainModel(m_Model).inputIndexes.size()");
89 for (uint32_t i = 0; i < getMainModel(m_Model).inputIndexes.size(); i++)
90 {
91 ALOGV("ModelToINetworkConverter::Convert(): getMainModel(m_Model).inputIndexes[i]");
92 // inputs in android nn are represented by operands
93 uint32_t inputIndex = getMainModel(m_Model).inputIndexes[i];
94 ALOGV("ModelToINetworkConverter::Convert(): getMainModel(m_Model).operands[inputIndex];");
95 const HalOperand& operand = getMainModel(m_Model).operands[inputIndex];
96 ALOGV("ModelToINetworkConverter::Convert(): GetTensorInfoForOperand(operand)");
97 const std::string layerName = "Input_" + std::to_string(i);
98 ALOGV("ModelToINetworkConverter::Convert(): m_Data.m_Network->AddInputLayer(i, layerName.c_str())");
99 armnn::IConnectableLayer* layer = m_Data.m_Network->AddInputLayer(i, layerName.c_str());
100
101 ALOGV("ModelToINetworkConverter::Convert(): layer->GetOutputSlot(0)");
102 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
103 ALOGV("ModelToINetworkConverter::Convert(): outputSlot.SetTensorInfo(GetTensorInfoForOperand(operand))");
104 outputSlot.SetTensorInfo(GetTensorInfoForOperand(operand));
105
106 ALOGV("ModelToINetworkConverter::Convert(): m_Data.m_OutputSlotForOperand[inputIndex] = &outputSlot");
107 // store for later layers
108 m_Data.m_OutputSlotForOperand[inputIndex] = &outputSlot;
109 }
110 }
111 catch (UnsupportedOperand<HalOperandType>& e)
112 {
113 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
114 m_ConversionResult = ConversionResult::UnsupportedFeature;
115 }
116 catch (const armnn::InvalidArgumentException& e)
117 {
118 Fail("%s: Failed to convert input operand to TensorShape: %s", __func__, e.what());
119 m_ConversionResult = ConversionResult::UnsupportedFeature;
120 }
121 bool UnsupportedDynamicOperation = false;
122 for (uint32_t operationIdx = 0; operationIdx < getMainModel(m_Model).operations.size(); operationIdx++)
123 {
124 const auto& operation = getMainModel(m_Model).operations[operationIdx];
125
126 bool ok = true;
127 if (m_ForcedUnsupportedOperations.find(operationIdx) != m_ForcedUnsupportedOperations.end())
128 {
129 Fail("%s: Operation at index %i has been forced to be unsupported.", __func__, operationIdx);
130 ok = false;
131 }
132
133 if (ok)
134 {
135 try
136 {
137 ok = HalPolicy::ConvertOperation(operation, m_Model, m_Data);
138 }
139 catch (UnsupportedOperand<HalOperandType>& e)
140 {
141 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
142 ok = false;
143 }
144 catch (const armnn::InvalidArgumentException& e)
145 {
146 Fail("%s: Failed to convert operation in %s", __func__, e.what());
147 ok = false;
148 }
149 }
150
151 // Store whether this operation was successfully converted.
152 m_OperationSupported.emplace(operationIdx, ok);
153
154 // Any single operation failing will fail the entire conversion.
155 // We still need to continue and check the other ones.
156 if (!ok)
157 {
158 if (m_Data.m_DynamicInputsEncountered)
159 {
160 Fail("%s: The unsupported operation at index %i has dynamic inputs.", __func__, operationIdx);
161 UnsupportedDynamicOperation = true;
162 }
163
164 m_ConversionResult = ConversionResult::UnsupportedFeature;
165 }
166 m_Data.m_DynamicInputsEncountered = false;
167 }
168
169 // Due to the NNAPI partitioner not supporting partition boundaries of unknown size,
170 // any operations who's outputs connect to an unsupported operation with with dynamic inputs
171 // will cause a failure.
172
173 // The simplest solution to this problem is to not support any operations in a model containing
174 // an unsupported operation with with dynamic inputs.
175 if (UnsupportedDynamicOperation)
176 {
177 Fail("%s: Unsupported operation with dynamic inputs found. Retroactively setting all operations to unsupported",
178 __func__);
179 for (auto& operation : m_OperationSupported)
180 {
181 operation.second = false;
182 }
183 }
184
185 try
186 {
187 if (m_ConversionResult == ConversionResult::Success)
188 {
189 for (uint32_t i = 0; i < getMainModel(m_Model).outputIndexes.size(); i++)
190 {
191 // outputs in android nn are represented by operands
192 uint32_t outputIndex = getMainModel(m_Model).outputIndexes[i];
193 const std::string layerName = "Output_" + std::to_string(i);
194 armnn::IConnectableLayer* layer = m_Data.m_Network->AddOutputLayer(i, layerName.c_str());
195
196 if (!m_Data.m_OutputSlotForOperand[outputIndex])
197 {
198 Fail("%s: OutputSlot %i does not exist", __func__, outputIndex);
199 m_ConversionResult = ConversionResult::UnsupportedFeature;
200 break;
201 }
202 m_Data.m_OutputSlotForOperand[outputIndex]->Connect(layer->GetInputSlot(0));
203 }
204 }
205 }
206 catch (const armnn::InvalidArgumentException& e)
207 {
208 Fail("%s: Failed to convert output operand to TensorShape: %s", __func__, e.what());
209 m_ConversionResult = ConversionResult::UnsupportedFeature;
210 }
211 }
212
213 template<typename HalPolicy>
IsOperationSupported(uint32_t operationIndex) const214 bool ModelToINetworkConverter<HalPolicy>::IsOperationSupported(uint32_t operationIndex) const
215 {
216 std::map<uint32_t, bool>::const_iterator it = m_OperationSupported.find(operationIndex);
217 if (it == m_OperationSupported.end())
218 {
219 return Fail("%s: Unrecognised Operation Index: %i", __func__, operationIndex);
220 }
221 return it->second;
222 }
223
224 ///
225 /// Class template specializations
226 ///
227
228 template class ModelToINetworkConverter<hal_1_0::HalPolicy>;
229
230 #ifdef ARMNN_ANDROID_NN_V1_1
231 template class ModelToINetworkConverter<hal_1_1::HalPolicy>;
232 #endif
233
234 #ifdef ARMNN_ANDROID_NN_V1_2
235 template class ModelToINetworkConverter<hal_1_1::HalPolicy>;
236 template class ModelToINetworkConverter<hal_1_2::HalPolicy>;
237 #endif
238
239 #ifdef ARMNN_ANDROID_NN_V1_3
240 template class ModelToINetworkConverter<hal_1_1::HalPolicy>;
241 template class ModelToINetworkConverter<hal_1_2::HalPolicy>;
242 template class ModelToINetworkConverter<hal_1_3::HalPolicy>;
243 #endif
244
245 } // armnn_driver
246