xref: /aosp_15_r20/external/android-nn-driver/Utils.cpp (revision 3e777be0405cee09af5d5785ff37f7cfb5bee59a)
1*3e777be0SXin Li //
2*3e777be0SXin Li // Copyright © 2017-2021,2023 Arm Ltd and Contributors. All rights reserved.
3*3e777be0SXin Li // SPDX-License-Identifier: MIT
4*3e777be0SXin Li //
5*3e777be0SXin Li 
6*3e777be0SXin Li #define LOG_TAG "ArmnnDriver"
7*3e777be0SXin Li 
8*3e777be0SXin Li #include "Utils.hpp"
9*3e777be0SXin Li #include "Half.hpp"
10*3e777be0SXin Li 
11*3e777be0SXin Li #include <armnnSerializer/ISerializer.hpp>
12*3e777be0SXin Li #include <armnnUtils/Filesystem.hpp>
13*3e777be0SXin Li #include <armnnUtils/Permute.hpp>
14*3e777be0SXin Li 
15*3e777be0SXin Li #include <armnn/Utils.hpp>
16*3e777be0SXin Li #include <log/log.h>
17*3e777be0SXin Li 
18*3e777be0SXin Li #include <cerrno>
19*3e777be0SXin Li #include <cinttypes>
20*3e777be0SXin Li #include <sstream>
21*3e777be0SXin Li #include <cstdio>
22*3e777be0SXin Li #include <time.h>
23*3e777be0SXin Li #include <string>
24*3e777be0SXin Li #include <span>
25*3e777be0SXin Li 
26*3e777be0SXin Li using namespace android;
27*3e777be0SXin Li using namespace android::hardware;
28*3e777be0SXin Li using namespace android::hidl::memory::V1_0;
29*3e777be0SXin Li 
30*3e777be0SXin Li namespace armnn_driver
31*3e777be0SXin Li {
32*3e777be0SXin Li const armnn::PermutationVector g_DontPermute{};
33*3e777be0SXin Li 
SwizzleAndroidNn4dTensorToArmNn(armnn::TensorInfo & tensorInfo,const void * input,void * output,const armnn::PermutationVector & mappings)34*3e777be0SXin Li void SwizzleAndroidNn4dTensorToArmNn(armnn::TensorInfo& tensorInfo, const void* input, void* output,
35*3e777be0SXin Li                                      const armnn::PermutationVector& mappings)
36*3e777be0SXin Li {
37*3e777be0SXin Li     if (tensorInfo.GetNumDimensions() != 4U)
38*3e777be0SXin Li     {
39*3e777be0SXin Li         throw armnn::InvalidArgumentException("NumDimensions must be 4");
40*3e777be0SXin Li     }
41*3e777be0SXin Li     armnn::DataType dataType = tensorInfo.GetDataType();
42*3e777be0SXin Li     switch (dataType)
43*3e777be0SXin Li     {
44*3e777be0SXin Li     case armnn::DataType::Float16:
45*3e777be0SXin Li     case armnn::DataType::Float32:
46*3e777be0SXin Li     case armnn::DataType::QAsymmU8:
47*3e777be0SXin Li     case armnn::DataType::QSymmS16:
48*3e777be0SXin Li     case armnn::DataType::QSymmS8:
49*3e777be0SXin Li     case armnn::DataType::QAsymmS8:
50*3e777be0SXin Li         // First swizzle tensor info
51*3e777be0SXin Li         tensorInfo = armnnUtils::Permuted(tensorInfo, mappings);
52*3e777be0SXin Li         // Then swizzle tensor data
53*3e777be0SXin Li         armnnUtils::Permute(tensorInfo.GetShape(), mappings, input, output, armnn::GetDataTypeSize(dataType));
54*3e777be0SXin Li         break;
55*3e777be0SXin Li     default:
56*3e777be0SXin Li         throw armnn::InvalidArgumentException("Unknown DataType for swizzling");
57*3e777be0SXin Li     }
58*3e777be0SXin Li }
59*3e777be0SXin Li 
60*3e777be0SXin Li template<typename Dimensions>
GetDimensionsSpecificity(const Dimensions & dimensions)61*3e777be0SXin Li auto GetDimensionsSpecificity(const Dimensions& dimensions)
62*3e777be0SXin Li {
63*3e777be0SXin Li     // We can't use std::vector<bool> since that is a specialization that packs
64*3e777be0SXin Li     // bits, so use a string of bools instead. This also has the benefit of
65*3e777be0SXin Li     // using small string optimization.
66*3e777be0SXin Li     std::basic_string<bool> specificity(dimensions.size(), false);
67*3e777be0SXin Li 
68*3e777be0SXin Li     for (std::size_t i = 0; i < dimensions.size(); ++i)
69*3e777be0SXin Li     {
70*3e777be0SXin Li         specificity[i] = dimensions.data()[i] != 0;
71*3e777be0SXin Li     }
72*3e777be0SXin Li 
73*3e777be0SXin Li     return specificity;
74*3e777be0SXin Li }
75*3e777be0SXin Li 
GetMemoryFromPool(V1_0::DataLocation location,const std::vector<android::nn::RunTimePoolInfo> & memPools)76*3e777be0SXin Li void* GetMemoryFromPool(V1_0::DataLocation location, const std::vector<android::nn::RunTimePoolInfo>& memPools)
77*3e777be0SXin Li {
78*3e777be0SXin Li     // find the location within the pool
79*3e777be0SXin Li     if (location.poolIndex >= memPools.size())
80*3e777be0SXin Li     {
81*3e777be0SXin Li         throw armnn::InvalidArgumentException("The poolIndex is greater than the memPools size.");
82*3e777be0SXin Li     }
83*3e777be0SXin Li 
84*3e777be0SXin Li     const android::nn::RunTimePoolInfo& memPool = memPools[location.poolIndex];
85*3e777be0SXin Li 
86*3e777be0SXin Li     uint8_t* memPoolBuffer = memPool.getBuffer();
87*3e777be0SXin Li 
88*3e777be0SXin Li     uint8_t* memory = memPoolBuffer + location.offset;
89*3e777be0SXin Li 
90*3e777be0SXin Li     return memory;
91*3e777be0SXin Li }
92*3e777be0SXin Li 
GetTensorInfoForOperand(const V1_0::Operand & operand)93*3e777be0SXin Li armnn::TensorInfo GetTensorInfoForOperand(const V1_0::Operand& operand)
94*3e777be0SXin Li {
95*3e777be0SXin Li     using namespace armnn;
96*3e777be0SXin Li     DataType type;
97*3e777be0SXin Li 
98*3e777be0SXin Li     switch (operand.type)
99*3e777be0SXin Li     {
100*3e777be0SXin Li         case V1_0::OperandType::TENSOR_FLOAT32:
101*3e777be0SXin Li             type = armnn::DataType::Float32;
102*3e777be0SXin Li             break;
103*3e777be0SXin Li         case V1_0::OperandType::TENSOR_QUANT8_ASYMM:
104*3e777be0SXin Li             type = armnn::DataType::QAsymmU8;
105*3e777be0SXin Li             break;
106*3e777be0SXin Li         case V1_0::OperandType::TENSOR_INT32:
107*3e777be0SXin Li             type = armnn::DataType::Signed32;
108*3e777be0SXin Li             break;
109*3e777be0SXin Li         default:
110*3e777be0SXin Li             throw UnsupportedOperand<V1_0::OperandType>(operand.type);
111*3e777be0SXin Li     }
112*3e777be0SXin Li 
113*3e777be0SXin Li     TensorInfo ret;
114*3e777be0SXin Li     if (operand.dimensions.size() == 0)
115*3e777be0SXin Li     {
116*3e777be0SXin Li         TensorShape tensorShape(Dimensionality::NotSpecified);
117*3e777be0SXin Li         ret = TensorInfo(tensorShape, type);
118*3e777be0SXin Li     }
119*3e777be0SXin Li     else
120*3e777be0SXin Li     {
121*3e777be0SXin Li         auto dimensionsSpecificity = GetDimensionsSpecificity(operand.dimensions);
122*3e777be0SXin Li         TensorShape tensorShape(operand.dimensions.size(), operand.dimensions.data(), dimensionsSpecificity.data());
123*3e777be0SXin Li         ret = TensorInfo(tensorShape, type);
124*3e777be0SXin Li     }
125*3e777be0SXin Li 
126*3e777be0SXin Li     ret.SetQuantizationScale(operand.scale);
127*3e777be0SXin Li     ret.SetQuantizationOffset(operand.zeroPoint);
128*3e777be0SXin Li 
129*3e777be0SXin Li     return ret;
130*3e777be0SXin Li }
131*3e777be0SXin Li 
132*3e777be0SXin Li #if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)// Using ::android::hardware::neuralnetworks::V1_2
133*3e777be0SXin Li 
GetTensorInfoForOperand(const V1_2::Operand & operand)134*3e777be0SXin Li armnn::TensorInfo GetTensorInfoForOperand(const V1_2::Operand& operand)
135*3e777be0SXin Li {
136*3e777be0SXin Li     using namespace armnn;
137*3e777be0SXin Li     bool perChannel = false;
138*3e777be0SXin Li 
139*3e777be0SXin Li     DataType type;
140*3e777be0SXin Li     switch (operand.type)
141*3e777be0SXin Li     {
142*3e777be0SXin Li         case V1_2::OperandType::TENSOR_BOOL8:
143*3e777be0SXin Li             type = armnn::DataType::Boolean;
144*3e777be0SXin Li             break;
145*3e777be0SXin Li         case V1_2::OperandType::TENSOR_FLOAT32:
146*3e777be0SXin Li             type = armnn::DataType::Float32;
147*3e777be0SXin Li             break;
148*3e777be0SXin Li         case V1_2::OperandType::TENSOR_FLOAT16:
149*3e777be0SXin Li             type = armnn::DataType::Float16;
150*3e777be0SXin Li             break;
151*3e777be0SXin Li         case V1_2::OperandType::TENSOR_QUANT8_ASYMM:
152*3e777be0SXin Li             type = armnn::DataType::QAsymmU8;
153*3e777be0SXin Li             break;
154*3e777be0SXin Li         case V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
155*3e777be0SXin Li             perChannel=true;
156*3e777be0SXin Li             ARMNN_FALLTHROUGH;
157*3e777be0SXin Li         case V1_2::OperandType::TENSOR_QUANT8_SYMM:
158*3e777be0SXin Li             type = armnn::DataType::QSymmS8;
159*3e777be0SXin Li             break;
160*3e777be0SXin Li         case V1_2::OperandType::TENSOR_QUANT16_SYMM:
161*3e777be0SXin Li             type = armnn::DataType::QSymmS16;
162*3e777be0SXin Li             break;
163*3e777be0SXin Li         case V1_2::OperandType::TENSOR_INT32:
164*3e777be0SXin Li             type = armnn::DataType::Signed32;
165*3e777be0SXin Li             break;
166*3e777be0SXin Li         default:
167*3e777be0SXin Li             throw UnsupportedOperand<V1_2::OperandType>(operand.type);
168*3e777be0SXin Li     }
169*3e777be0SXin Li 
170*3e777be0SXin Li     TensorInfo ret;
171*3e777be0SXin Li     if (operand.dimensions.size() == 0)
172*3e777be0SXin Li     {
173*3e777be0SXin Li         TensorShape tensorShape(Dimensionality::NotSpecified);
174*3e777be0SXin Li         ret = TensorInfo(tensorShape, type);
175*3e777be0SXin Li     }
176*3e777be0SXin Li     else
177*3e777be0SXin Li     {
178*3e777be0SXin Li         auto dimensionsSpecificity = GetDimensionsSpecificity(operand.dimensions);
179*3e777be0SXin Li         TensorShape tensorShape(operand.dimensions.size(), operand.dimensions.data(), dimensionsSpecificity.data());
180*3e777be0SXin Li         ret = TensorInfo(tensorShape, type);
181*3e777be0SXin Li     }
182*3e777be0SXin Li 
183*3e777be0SXin Li     if (perChannel)
184*3e777be0SXin Li     {
185*3e777be0SXin Li         if (operand.extraParams.getDiscriminator() != V1_2::Operand::ExtraParams::hidl_discriminator::channelQuant)
186*3e777be0SXin Li         {
187*3e777be0SXin Li             throw armnn::InvalidArgumentException("ExtraParams is expected to be of type channelQuant");
188*3e777be0SXin Li         }
189*3e777be0SXin Li 
190*3e777be0SXin Li         auto perAxisQuantParams = operand.extraParams.channelQuant();
191*3e777be0SXin Li 
192*3e777be0SXin Li         ret.SetQuantizationScales(perAxisQuantParams.scales);
193*3e777be0SXin Li         ret.SetQuantizationDim(MakeOptional<unsigned int>(perAxisQuantParams.channelDim));
194*3e777be0SXin Li     }
195*3e777be0SXin Li     else
196*3e777be0SXin Li     {
197*3e777be0SXin Li         ret.SetQuantizationScale(operand.scale);
198*3e777be0SXin Li         ret.SetQuantizationOffset(operand.zeroPoint);
199*3e777be0SXin Li     }
200*3e777be0SXin Li 
201*3e777be0SXin Li     return ret;
202*3e777be0SXin Li }
203*3e777be0SXin Li 
204*3e777be0SXin Li #endif
205*3e777be0SXin Li 
206*3e777be0SXin Li #ifdef ARMNN_ANDROID_NN_V1_3 // Using ::android::hardware::neuralnetworks::V1_3
207*3e777be0SXin Li 
GetTensorInfoForOperand(const V1_3::Operand & operand)208*3e777be0SXin Li armnn::TensorInfo GetTensorInfoForOperand(const V1_3::Operand& operand)
209*3e777be0SXin Li {
210*3e777be0SXin Li     using namespace armnn;
211*3e777be0SXin Li     bool perChannel = false;
212*3e777be0SXin Li     bool isScalar   = false;
213*3e777be0SXin Li 
214*3e777be0SXin Li     DataType type;
215*3e777be0SXin Li     switch (operand.type)
216*3e777be0SXin Li     {
217*3e777be0SXin Li         case V1_3::OperandType::TENSOR_BOOL8:
218*3e777be0SXin Li             type = armnn::DataType::Boolean;
219*3e777be0SXin Li             break;
220*3e777be0SXin Li         case V1_3::OperandType::TENSOR_FLOAT32:
221*3e777be0SXin Li             type = armnn::DataType::Float32;
222*3e777be0SXin Li             break;
223*3e777be0SXin Li         case V1_3::OperandType::TENSOR_FLOAT16:
224*3e777be0SXin Li             type = armnn::DataType::Float16;
225*3e777be0SXin Li             break;
226*3e777be0SXin Li         case V1_3::OperandType::TENSOR_QUANT8_ASYMM:
227*3e777be0SXin Li             type = armnn::DataType::QAsymmU8;
228*3e777be0SXin Li             break;
229*3e777be0SXin Li         case V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
230*3e777be0SXin Li             perChannel=true;
231*3e777be0SXin Li             ARMNN_FALLTHROUGH;
232*3e777be0SXin Li         case V1_3::OperandType::TENSOR_QUANT8_SYMM:
233*3e777be0SXin Li             type = armnn::DataType::QSymmS8;
234*3e777be0SXin Li             break;
235*3e777be0SXin Li         case V1_3::OperandType::TENSOR_QUANT16_SYMM:
236*3e777be0SXin Li             type = armnn::DataType::QSymmS16;
237*3e777be0SXin Li             break;
238*3e777be0SXin Li         case V1_3::OperandType::TENSOR_INT32:
239*3e777be0SXin Li             type = armnn::DataType::Signed32;
240*3e777be0SXin Li             break;
241*3e777be0SXin Li         case V1_3::OperandType::INT32:
242*3e777be0SXin Li             type = armnn::DataType::Signed32;
243*3e777be0SXin Li             isScalar = true;
244*3e777be0SXin Li             break;
245*3e777be0SXin Li         case V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
246*3e777be0SXin Li             type = armnn::DataType::QAsymmS8;
247*3e777be0SXin Li             break;
248*3e777be0SXin Li         default:
249*3e777be0SXin Li             throw UnsupportedOperand<V1_3::OperandType>(operand.type);
250*3e777be0SXin Li     }
251*3e777be0SXin Li 
252*3e777be0SXin Li     TensorInfo ret;
253*3e777be0SXin Li     if (isScalar)
254*3e777be0SXin Li     {
255*3e777be0SXin Li         ret = TensorInfo(TensorShape(armnn::Dimensionality::Scalar), type);
256*3e777be0SXin Li     }
257*3e777be0SXin Li     else
258*3e777be0SXin Li     {
259*3e777be0SXin Li         if (operand.dimensions.size() == 0)
260*3e777be0SXin Li         {
261*3e777be0SXin Li             TensorShape tensorShape(Dimensionality::NotSpecified);
262*3e777be0SXin Li             ret = TensorInfo(tensorShape, type);
263*3e777be0SXin Li         }
264*3e777be0SXin Li         else
265*3e777be0SXin Li         {
266*3e777be0SXin Li             auto dimensionsSpecificity = GetDimensionsSpecificity(operand.dimensions);
267*3e777be0SXin Li             TensorShape tensorShape(operand.dimensions.size(), operand.dimensions.data(), dimensionsSpecificity.data());
268*3e777be0SXin Li             ret = TensorInfo(tensorShape, type);
269*3e777be0SXin Li         }
270*3e777be0SXin Li     }
271*3e777be0SXin Li 
272*3e777be0SXin Li     if (perChannel)
273*3e777be0SXin Li     {
274*3e777be0SXin Li         // ExtraParams is expected to be of type channelQuant
275*3e777be0SXin Li         if (operand.extraParams.getDiscriminator() != V1_2::Operand::ExtraParams::hidl_discriminator::channelQuant)
276*3e777be0SXin Li         {
277*3e777be0SXin Li             throw armnn::InvalidArgumentException("ExtraParams is expected to be of type channelQuant");
278*3e777be0SXin Li         }
279*3e777be0SXin Li         auto perAxisQuantParams = operand.extraParams.channelQuant();
280*3e777be0SXin Li 
281*3e777be0SXin Li         ret.SetQuantizationScales(perAxisQuantParams.scales);
282*3e777be0SXin Li         ret.SetQuantizationDim(MakeOptional<unsigned int>(perAxisQuantParams.channelDim));
283*3e777be0SXin Li     }
284*3e777be0SXin Li     else
285*3e777be0SXin Li     {
286*3e777be0SXin Li         ret.SetQuantizationScale(operand.scale);
287*3e777be0SXin Li         ret.SetQuantizationOffset(operand.zeroPoint);
288*3e777be0SXin Li     }
289*3e777be0SXin Li     return ret;
290*3e777be0SXin Li }
291*3e777be0SXin Li 
292*3e777be0SXin Li #endif
293*3e777be0SXin Li 
GetOperandSummary(const V1_0::Operand & operand)294*3e777be0SXin Li std::string GetOperandSummary(const V1_0::Operand& operand)
295*3e777be0SXin Li {
296*3e777be0SXin Li     return android::hardware::details::arrayToString(operand.dimensions, operand.dimensions.size()) + " " +
297*3e777be0SXin Li         toString(operand.type);
298*3e777be0SXin Li }
299*3e777be0SXin Li 
300*3e777be0SXin Li #if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3) // Using ::android::hardware::neuralnetworks::V1_2
301*3e777be0SXin Li 
GetOperandSummary(const V1_2::Operand & operand)302*3e777be0SXin Li std::string GetOperandSummary(const V1_2::Operand& operand)
303*3e777be0SXin Li {
304*3e777be0SXin Li     return android::hardware::details::arrayToString(operand.dimensions, operand.dimensions.size()) + " " +
305*3e777be0SXin Li            toString(operand.type);
306*3e777be0SXin Li }
307*3e777be0SXin Li 
308*3e777be0SXin Li #endif
309*3e777be0SXin Li 
310*3e777be0SXin Li #ifdef ARMNN_ANDROID_NN_V1_3 // Using ::android::hardware::neuralnetworks::V1_3
311*3e777be0SXin Li 
GetOperandSummary(const V1_3::Operand & operand)312*3e777be0SXin Li std::string GetOperandSummary(const V1_3::Operand& operand)
313*3e777be0SXin Li {
314*3e777be0SXin Li     return android::hardware::details::arrayToString(operand.dimensions, operand.dimensions.size()) + " " +
315*3e777be0SXin Li            toString(operand.type);
316*3e777be0SXin Li }
317*3e777be0SXin Li 
318*3e777be0SXin Li #endif
319*3e777be0SXin Li 
320*3e777be0SXin Li template <typename TensorType>
321*3e777be0SXin Li using DumpElementFunction = void (*)(const TensorType& tensor,
322*3e777be0SXin Li     unsigned int elementIndex,
323*3e777be0SXin Li     std::ofstream& fileStream);
324*3e777be0SXin Li 
325*3e777be0SXin Li namespace
326*3e777be0SXin Li {
327*3e777be0SXin Li template <typename TensorType, typename ElementType, typename PrintableType = ElementType>
DumpTensorElement(const TensorType & tensor,unsigned int elementIndex,std::ofstream & fileStream)328*3e777be0SXin Li void DumpTensorElement(const TensorType& tensor, unsigned int elementIndex, std::ofstream& fileStream)
329*3e777be0SXin Li {
330*3e777be0SXin Li     const ElementType* elements = reinterpret_cast<const ElementType*>(tensor.GetMemoryArea());
331*3e777be0SXin Li     fileStream << static_cast<PrintableType>(elements[elementIndex]) << " ";
332*3e777be0SXin Li }
333*3e777be0SXin Li 
334*3e777be0SXin Li } // namespace
335*3e777be0SXin Li 
336*3e777be0SXin Li template <typename TensorType>
DumpTensor(const std::string & dumpDir,const std::string & requestName,const std::string & tensorName,const TensorType & tensor)337*3e777be0SXin Li void DumpTensor(const std::string& dumpDir,
338*3e777be0SXin Li     const std::string& requestName,
339*3e777be0SXin Li     const std::string& tensorName,
340*3e777be0SXin Li     const TensorType& tensor)
341*3e777be0SXin Li {
342*3e777be0SXin Li     // The dump directory must exist in advance.
343*3e777be0SXin Li     fs::path dumpPath = dumpDir;
344*3e777be0SXin Li     const fs::path fileName = dumpPath / (requestName + "_" + tensorName + ".dump");
345*3e777be0SXin Li 
346*3e777be0SXin Li     std::ofstream fileStream;
347*3e777be0SXin Li     fileStream.open(fileName.c_str(), std::ofstream::out | std::ofstream::trunc);
348*3e777be0SXin Li 
349*3e777be0SXin Li     if (!fileStream.good())
350*3e777be0SXin Li     {
351*3e777be0SXin Li         ALOGW("Could not open file %s for writing", fileName.c_str());
352*3e777be0SXin Li         return;
353*3e777be0SXin Li     }
354*3e777be0SXin Li 
355*3e777be0SXin Li     DumpElementFunction<TensorType> dumpElementFunction = nullptr;
356*3e777be0SXin Li 
357*3e777be0SXin Li     switch (tensor.GetDataType())
358*3e777be0SXin Li     {
359*3e777be0SXin Li         case armnn::DataType::Float32:
360*3e777be0SXin Li         {
361*3e777be0SXin Li             dumpElementFunction = &DumpTensorElement<TensorType, float>;
362*3e777be0SXin Li             break;
363*3e777be0SXin Li         }
364*3e777be0SXin Li         case armnn::DataType::QAsymmU8:
365*3e777be0SXin Li         {
366*3e777be0SXin Li             dumpElementFunction = &DumpTensorElement<TensorType, uint8_t, uint32_t>;
367*3e777be0SXin Li             break;
368*3e777be0SXin Li         }
369*3e777be0SXin Li         case armnn::DataType::Signed32:
370*3e777be0SXin Li         {
371*3e777be0SXin Li             dumpElementFunction = &DumpTensorElement<TensorType, int32_t>;
372*3e777be0SXin Li             break;
373*3e777be0SXin Li         }
374*3e777be0SXin Li         case armnn::DataType::Float16:
375*3e777be0SXin Li         {
376*3e777be0SXin Li             dumpElementFunction = &DumpTensorElement<TensorType, armnn::Half>;
377*3e777be0SXin Li             break;
378*3e777be0SXin Li         }
379*3e777be0SXin Li         case armnn::DataType::QAsymmS8:
380*3e777be0SXin Li         {
381*3e777be0SXin Li             dumpElementFunction = &DumpTensorElement<TensorType, int8_t, int32_t>;
382*3e777be0SXin Li             break;
383*3e777be0SXin Li         }
384*3e777be0SXin Li         case armnn::DataType::Boolean:
385*3e777be0SXin Li         {
386*3e777be0SXin Li             dumpElementFunction = &DumpTensorElement<TensorType, bool>;
387*3e777be0SXin Li             break;
388*3e777be0SXin Li         }
389*3e777be0SXin Li         default:
390*3e777be0SXin Li         {
391*3e777be0SXin Li             dumpElementFunction = nullptr;
392*3e777be0SXin Li         }
393*3e777be0SXin Li     }
394*3e777be0SXin Li 
395*3e777be0SXin Li     if (dumpElementFunction != nullptr)
396*3e777be0SXin Li     {
397*3e777be0SXin Li         const unsigned int numDimensions = tensor.GetNumDimensions();
398*3e777be0SXin Li         const armnn::TensorShape shape = tensor.GetShape();
399*3e777be0SXin Li 
400*3e777be0SXin Li         if (!shape.AreAllDimensionsSpecified())
401*3e777be0SXin Li         {
402*3e777be0SXin Li             fileStream << "Cannot dump tensor elements: not all dimensions are specified" << std::endl;
403*3e777be0SXin Li             return;
404*3e777be0SXin Li         }
405*3e777be0SXin Li         fileStream << "# Number of elements " << tensor.GetNumElements() << std::endl;
406*3e777be0SXin Li 
407*3e777be0SXin Li         if (numDimensions == 0)
408*3e777be0SXin Li         {
409*3e777be0SXin Li             fileStream << "# Shape []" << std::endl;
410*3e777be0SXin Li             return;
411*3e777be0SXin Li         }
412*3e777be0SXin Li         fileStream << "# Shape [" << shape[0];
413*3e777be0SXin Li         for (unsigned int d = 1; d < numDimensions; ++d)
414*3e777be0SXin Li         {
415*3e777be0SXin Li             fileStream << "," << shape[d];
416*3e777be0SXin Li         }
417*3e777be0SXin Li         fileStream << "]" << std::endl;
418*3e777be0SXin Li         fileStream << "Each line contains the data of each of the elements of dimension0. In NCHW and NHWC, each line"
419*3e777be0SXin Li                       " will be a batch" << std::endl << std::endl;
420*3e777be0SXin Li 
421*3e777be0SXin Li         // Split will create a new line after all elements of the first dimension
422*3e777be0SXin Li         // (in a 4, 3, 2, 3 tensor, there will be 4 lines of 18 elements)
423*3e777be0SXin Li         unsigned int split = 1;
424*3e777be0SXin Li         if (numDimensions == 1)
425*3e777be0SXin Li         {
426*3e777be0SXin Li             split = shape[0];
427*3e777be0SXin Li         }
428*3e777be0SXin Li         else
429*3e777be0SXin Li         {
430*3e777be0SXin Li             for (unsigned int i = 1; i < numDimensions; ++i)
431*3e777be0SXin Li             {
432*3e777be0SXin Li                 split *= shape[i];
433*3e777be0SXin Li             }
434*3e777be0SXin Li         }
435*3e777be0SXin Li 
436*3e777be0SXin Li         // Print all elements in the tensor
437*3e777be0SXin Li         for (unsigned int elementIndex = 0; elementIndex < tensor.GetNumElements(); ++elementIndex)
438*3e777be0SXin Li         {
439*3e777be0SXin Li             (*dumpElementFunction)(tensor, elementIndex, fileStream);
440*3e777be0SXin Li 
441*3e777be0SXin Li             if ( (elementIndex + 1) % split == 0 )
442*3e777be0SXin Li             {
443*3e777be0SXin Li                 fileStream << std::endl;
444*3e777be0SXin Li             }
445*3e777be0SXin Li         }
446*3e777be0SXin Li         fileStream << std::endl;
447*3e777be0SXin Li     }
448*3e777be0SXin Li     else
449*3e777be0SXin Li     {
450*3e777be0SXin Li         fileStream << "Cannot dump tensor elements: Unsupported data type "
451*3e777be0SXin Li             << static_cast<unsigned int>(tensor.GetDataType()) << std::endl;
452*3e777be0SXin Li     }
453*3e777be0SXin Li 
454*3e777be0SXin Li     if (!fileStream.good())
455*3e777be0SXin Li     {
456*3e777be0SXin Li         ALOGW("An error occurred when writing to file %s", fileName.c_str());
457*3e777be0SXin Li     }
458*3e777be0SXin Li }
459*3e777be0SXin Li 
460*3e777be0SXin Li 
461*3e777be0SXin Li template void DumpTensor<armnn::ConstTensor>(const std::string& dumpDir,
462*3e777be0SXin Li                                              const std::string& requestName,
463*3e777be0SXin Li                                              const std::string& tensorName,
464*3e777be0SXin Li                                              const armnn::ConstTensor& tensor);
465*3e777be0SXin Li 
466*3e777be0SXin Li template void DumpTensor<armnn::Tensor>(const std::string& dumpDir,
467*3e777be0SXin Li                                         const std::string& requestName,
468*3e777be0SXin Li                                         const std::string& tensorName,
469*3e777be0SXin Li                                         const armnn::Tensor& tensor);
470*3e777be0SXin Li 
DumpJsonProfilingIfRequired(bool gpuProfilingEnabled,const std::string & dumpDir,armnn::NetworkId networkId,const armnn::IProfiler * profiler)471*3e777be0SXin Li void DumpJsonProfilingIfRequired(bool gpuProfilingEnabled,
472*3e777be0SXin Li                                  const std::string& dumpDir,
473*3e777be0SXin Li                                  armnn::NetworkId networkId,
474*3e777be0SXin Li                                  const armnn::IProfiler* profiler)
475*3e777be0SXin Li {
476*3e777be0SXin Li     // Check if profiling is required.
477*3e777be0SXin Li     if (!gpuProfilingEnabled)
478*3e777be0SXin Li     {
479*3e777be0SXin Li         return;
480*3e777be0SXin Li     }
481*3e777be0SXin Li 
482*3e777be0SXin Li     // The dump directory must exist in advance.
483*3e777be0SXin Li     if (dumpDir.empty())
484*3e777be0SXin Li     {
485*3e777be0SXin Li         return;
486*3e777be0SXin Li     }
487*3e777be0SXin Li 
488*3e777be0SXin Li     if (!profiler)
489*3e777be0SXin Li     {
490*3e777be0SXin Li         ALOGW("profiler was null");
491*3e777be0SXin Li         return;
492*3e777be0SXin Li     }
493*3e777be0SXin Li 
494*3e777be0SXin Li     // Set the name of the output profiling file.
495*3e777be0SXin Li     fs::path dumpPath = dumpDir;
496*3e777be0SXin Li     const fs::path fileName = dumpPath / (std::to_string(networkId) + "_profiling.json");
497*3e777be0SXin Li 
498*3e777be0SXin Li     // Open the ouput file for writing.
499*3e777be0SXin Li     std::ofstream fileStream;
500*3e777be0SXin Li     fileStream.open(fileName.c_str(), std::ofstream::out | std::ofstream::trunc);
501*3e777be0SXin Li 
502*3e777be0SXin Li     if (!fileStream.good())
503*3e777be0SXin Li     {
504*3e777be0SXin Li         ALOGW("Could not open file %s for writing", fileName.c_str());
505*3e777be0SXin Li         return;
506*3e777be0SXin Li     }
507*3e777be0SXin Li 
508*3e777be0SXin Li     // Write the profiling info to a JSON file.
509*3e777be0SXin Li     profiler->Print(fileStream);
510*3e777be0SXin Li }
511*3e777be0SXin Li 
ExportNetworkGraphToDotFile(const armnn::IOptimizedNetwork & optimizedNetwork,const std::string & dumpDir)512*3e777be0SXin Li std::string ExportNetworkGraphToDotFile(const armnn::IOptimizedNetwork& optimizedNetwork,
513*3e777be0SXin Li                                         const std::string& dumpDir)
514*3e777be0SXin Li {
515*3e777be0SXin Li     std::string fileName;
516*3e777be0SXin Li     // The dump directory must exist in advance.
517*3e777be0SXin Li     if (dumpDir.empty())
518*3e777be0SXin Li     {
519*3e777be0SXin Li         return fileName;
520*3e777be0SXin Li     }
521*3e777be0SXin Li 
522*3e777be0SXin Li     std::string timestamp = GetFileTimestamp();
523*3e777be0SXin Li     if (timestamp.empty())
524*3e777be0SXin Li     {
525*3e777be0SXin Li         return fileName;
526*3e777be0SXin Li     }
527*3e777be0SXin Li 
528*3e777be0SXin Li     // Set the name of the output .dot file.
529*3e777be0SXin Li     fs::path dumpPath = dumpDir;
530*3e777be0SXin Li     fs::path tempFilePath = dumpPath / (timestamp + "_networkgraph.dot");
531*3e777be0SXin Li     fileName = tempFilePath.string();
532*3e777be0SXin Li 
533*3e777be0SXin Li     ALOGV("Exporting the optimized network graph to file: %s", fileName.c_str());
534*3e777be0SXin Li 
535*3e777be0SXin Li     // Write the network graph to a dot file.
536*3e777be0SXin Li     std::ofstream fileStream;
537*3e777be0SXin Li     fileStream.open(fileName, std::ofstream::out | std::ofstream::trunc);
538*3e777be0SXin Li 
539*3e777be0SXin Li     if (!fileStream.good())
540*3e777be0SXin Li     {
541*3e777be0SXin Li         ALOGW("Could not open file %s for writing", fileName.c_str());
542*3e777be0SXin Li         return fileName;
543*3e777be0SXin Li     }
544*3e777be0SXin Li 
545*3e777be0SXin Li     if (optimizedNetwork.SerializeToDot(fileStream) != armnn::Status::Success)
546*3e777be0SXin Li     {
547*3e777be0SXin Li         ALOGW("An error occurred when writing to file %s", fileName.c_str());
548*3e777be0SXin Li     }
549*3e777be0SXin Li     return fileName;
550*3e777be0SXin Li }
551*3e777be0SXin Li 
SerializeNetwork(const armnn::INetwork & network,const std::string & dumpDir,std::vector<uint8_t> & dataCacheData,bool dataCachingActive)552*3e777be0SXin Li std::string SerializeNetwork(const armnn::INetwork& network,
553*3e777be0SXin Li                              const std::string& dumpDir,
554*3e777be0SXin Li                              std::vector<uint8_t>& dataCacheData,
555*3e777be0SXin Li                              bool dataCachingActive)
556*3e777be0SXin Li {
557*3e777be0SXin Li     std::string fileName;
558*3e777be0SXin Li     bool bSerializeToFile = true;
559*3e777be0SXin Li     if (dumpDir.empty())
560*3e777be0SXin Li     {
561*3e777be0SXin Li         bSerializeToFile = false;
562*3e777be0SXin Li     }
563*3e777be0SXin Li     else
564*3e777be0SXin Li     {
565*3e777be0SXin Li         std::string timestamp = GetFileTimestamp();
566*3e777be0SXin Li         if (timestamp.empty())
567*3e777be0SXin Li         {
568*3e777be0SXin Li             bSerializeToFile = false;
569*3e777be0SXin Li         }
570*3e777be0SXin Li     }
571*3e777be0SXin Li     if (!bSerializeToFile && !dataCachingActive)
572*3e777be0SXin Li     {
573*3e777be0SXin Li         return fileName;
574*3e777be0SXin Li     }
575*3e777be0SXin Li 
576*3e777be0SXin Li     auto serializer(armnnSerializer::ISerializer::Create());
577*3e777be0SXin Li     // Serialize the Network
578*3e777be0SXin Li     serializer->Serialize(network);
579*3e777be0SXin Li     if (dataCachingActive)
580*3e777be0SXin Li     {
581*3e777be0SXin Li         std::stringstream stream;
582*3e777be0SXin Li         auto serialized = serializer->SaveSerializedToStream(stream);
583*3e777be0SXin Li         if (serialized)
584*3e777be0SXin Li         {
585*3e777be0SXin Li             std::string const serializedString{stream.str()};
586*3e777be0SXin Li             std::copy(serializedString.begin(), serializedString.end(), std::back_inserter(dataCacheData));
587*3e777be0SXin Li         }
588*3e777be0SXin Li     }
589*3e777be0SXin Li 
590*3e777be0SXin Li     if (bSerializeToFile)
591*3e777be0SXin Li     {
592*3e777be0SXin Li         // Set the name of the output .armnn file.
593*3e777be0SXin Li         fs::path dumpPath = dumpDir;
594*3e777be0SXin Li         std::string timestamp = GetFileTimestamp();
595*3e777be0SXin Li         fs::path tempFilePath = dumpPath / (timestamp + "_network.armnn");
596*3e777be0SXin Li         fileName = tempFilePath.string();
597*3e777be0SXin Li 
598*3e777be0SXin Li         // Save serialized network to a file
599*3e777be0SXin Li         std::ofstream serializedFile(fileName, std::ios::out | std::ios::binary);
600*3e777be0SXin Li         auto serialized = serializer->SaveSerializedToStream(serializedFile);
601*3e777be0SXin Li         if (!serialized)
602*3e777be0SXin Li         {
603*3e777be0SXin Li             ALOGW("An error occurred when serializing to file %s", fileName.c_str());
604*3e777be0SXin Li         }
605*3e777be0SXin Li     }
606*3e777be0SXin Li     return fileName;
607*3e777be0SXin Li }
608*3e777be0SXin Li 
IsDynamicTensor(const armnn::TensorInfo & tensorInfo)609*3e777be0SXin Li bool IsDynamicTensor(const armnn::TensorInfo& tensorInfo)
610*3e777be0SXin Li {
611*3e777be0SXin Li     if (tensorInfo.GetShape().GetDimensionality() == armnn::Dimensionality::NotSpecified)
612*3e777be0SXin Li     {
613*3e777be0SXin Li         return true;
614*3e777be0SXin Li     }
615*3e777be0SXin Li     // Account for the usage of the TensorShape empty constructor
616*3e777be0SXin Li     if (tensorInfo.GetNumDimensions() == 0)
617*3e777be0SXin Li     {
618*3e777be0SXin Li         return true;
619*3e777be0SXin Li     }
620*3e777be0SXin Li     return !tensorInfo.GetShape().AreAllDimensionsSpecified();
621*3e777be0SXin Li }
622*3e777be0SXin Li 
AreDynamicTensorsSupported()623*3e777be0SXin Li bool AreDynamicTensorsSupported()
624*3e777be0SXin Li {
625*3e777be0SXin Li #if defined(ARMNN_ANDROID_NN_V1_3)
626*3e777be0SXin Li     return true;
627*3e777be0SXin Li #else
628*3e777be0SXin Li     return false;
629*3e777be0SXin Li #endif
630*3e777be0SXin Li }
631*3e777be0SXin Li 
isQuantizedOperand(const V1_0::OperandType & operandType)632*3e777be0SXin Li bool isQuantizedOperand(const V1_0::OperandType& operandType)
633*3e777be0SXin Li {
634*3e777be0SXin Li     if (operandType == V1_0::OperandType::TENSOR_QUANT8_ASYMM)
635*3e777be0SXin Li     {
636*3e777be0SXin Li         return true;
637*3e777be0SXin Li     }
638*3e777be0SXin Li     else
639*3e777be0SXin Li     {
640*3e777be0SXin Li         return false;
641*3e777be0SXin Li     }
642*3e777be0SXin Li }
643*3e777be0SXin Li 
644*3e777be0SXin Li #if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)// Using ::android::hardware::neuralnetworks::V1_2
isQuantizedOperand(const V1_2::OperandType & operandType)645*3e777be0SXin Li bool isQuantizedOperand(const V1_2::OperandType& operandType)
646*3e777be0SXin Li {
647*3e777be0SXin Li     if (operandType == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
648*3e777be0SXin Li         operandType == V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
649*3e777be0SXin Li         operandType == V1_2::OperandType::TENSOR_QUANT8_SYMM ||
650*3e777be0SXin Li         operandType == V1_2::OperandType::TENSOR_QUANT16_SYMM )
651*3e777be0SXin Li     {
652*3e777be0SXin Li         return true;
653*3e777be0SXin Li     }
654*3e777be0SXin Li     else
655*3e777be0SXin Li     {
656*3e777be0SXin Li         return false;
657*3e777be0SXin Li     }
658*3e777be0SXin Li }
659*3e777be0SXin Li #endif
660*3e777be0SXin Li 
661*3e777be0SXin Li #ifdef ARMNN_ANDROID_NN_V1_3 // Using ::android::hardware::neuralnetworks::V1_3
isQuantizedOperand(const V1_3::OperandType & operandType)662*3e777be0SXin Li bool isQuantizedOperand(const V1_3::OperandType& operandType)
663*3e777be0SXin Li {
664*3e777be0SXin Li     if (operandType == V1_3::OperandType::TENSOR_QUANT8_ASYMM ||
665*3e777be0SXin Li         operandType == V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
666*3e777be0SXin Li         operandType == V1_3::OperandType::TENSOR_QUANT8_SYMM ||
667*3e777be0SXin Li         operandType == V1_3::OperandType::TENSOR_QUANT16_SYMM ||
668*3e777be0SXin Li         operandType == V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED)
669*3e777be0SXin Li     {
670*3e777be0SXin Li         return true;
671*3e777be0SXin Li     }
672*3e777be0SXin Li     else
673*3e777be0SXin Li     {
674*3e777be0SXin Li         return false;
675*3e777be0SXin Li     }
676*3e777be0SXin Li }
677*3e777be0SXin Li #endif
678*3e777be0SXin Li 
GetFileTimestamp()679*3e777be0SXin Li std::string GetFileTimestamp()
680*3e777be0SXin Li {
681*3e777be0SXin Li     // used to get a timestamp to name diagnostic files (the ArmNN serialized graph
682*3e777be0SXin Li     // and getSupportedOperations.txt files)
683*3e777be0SXin Li     timespec ts;
684*3e777be0SXin Li     int iRet = clock_gettime(CLOCK_MONOTONIC_RAW, &ts);
685*3e777be0SXin Li     std::stringstream ss;
686*3e777be0SXin Li     if (iRet == 0)
687*3e777be0SXin Li     {
688*3e777be0SXin Li         ss << std::to_string(ts.tv_sec) << "_" << std::to_string(ts.tv_nsec);
689*3e777be0SXin Li     }
690*3e777be0SXin Li     else
691*3e777be0SXin Li     {
692*3e777be0SXin Li         ALOGW("clock_gettime failed with errno %s : %s", std::to_string(errno).c_str(), std::strerror(errno));
693*3e777be0SXin Li     }
694*3e777be0SXin Li     return ss.str();
695*3e777be0SXin Li }
696*3e777be0SXin Li 
RenameExportedFiles(const std::string & existingSerializedFileName,const std::string & existingDotFileName,const std::string & dumpDir,const armnn::NetworkId networkId)697*3e777be0SXin Li void RenameExportedFiles(const std::string& existingSerializedFileName,
698*3e777be0SXin Li                          const std::string& existingDotFileName,
699*3e777be0SXin Li                          const std::string& dumpDir,
700*3e777be0SXin Li                          const armnn::NetworkId networkId)
701*3e777be0SXin Li {
702*3e777be0SXin Li     if (dumpDir.empty())
703*3e777be0SXin Li     {
704*3e777be0SXin Li         return;
705*3e777be0SXin Li     }
706*3e777be0SXin Li     RenameFile(existingSerializedFileName, std::string("_network.armnn"), dumpDir, networkId);
707*3e777be0SXin Li     RenameFile(existingDotFileName, std::string("_networkgraph.dot"), dumpDir, networkId);
708*3e777be0SXin Li }
709*3e777be0SXin Li 
RenameFile(const std::string & existingName,const std::string & extension,const std::string & dumpDir,const armnn::NetworkId networkId)710*3e777be0SXin Li void RenameFile(const std::string& existingName,
711*3e777be0SXin Li                 const std::string& extension,
712*3e777be0SXin Li                 const std::string& dumpDir,
713*3e777be0SXin Li                 const armnn::NetworkId networkId)
714*3e777be0SXin Li {
715*3e777be0SXin Li     if (existingName.empty() || dumpDir.empty())
716*3e777be0SXin Li     {
717*3e777be0SXin Li         return;
718*3e777be0SXin Li     }
719*3e777be0SXin Li 
720*3e777be0SXin Li     fs::path dumpPath = dumpDir;
721*3e777be0SXin Li     const fs::path newFileName = dumpPath / (std::to_string(networkId) + extension);
722*3e777be0SXin Li     int iRet = rename(existingName.c_str(), newFileName.c_str());
723*3e777be0SXin Li     if (iRet != 0)
724*3e777be0SXin Li     {
725*3e777be0SXin Li         std::stringstream ss;
726*3e777be0SXin Li         ss << "rename of [" << existingName << "] to [" << newFileName << "] failed with errno "
727*3e777be0SXin Li            << std::to_string(errno) << " : " << std::strerror(errno);
728*3e777be0SXin Li         ALOGW(ss.str().c_str());
729*3e777be0SXin Li     }
730*3e777be0SXin Li }
731*3e777be0SXin Li 
CommitPools(std::vector<::android::nn::RunTimePoolInfo> & memPools)732*3e777be0SXin Li void CommitPools(std::vector<::android::nn::RunTimePoolInfo>& memPools)
733*3e777be0SXin Li {
734*3e777be0SXin Li     if (memPools.empty())
735*3e777be0SXin Li     {
736*3e777be0SXin Li         return;
737*3e777be0SXin Li     }
738*3e777be0SXin Li     // Commit output buffers.
739*3e777be0SXin Li     // Note that we update *all* pools, even if they aren't actually used as outputs -
740*3e777be0SXin Li     // this is simpler and is what the CpuExecutor does.
741*3e777be0SXin Li     for (auto& pool : memPools)
742*3e777be0SXin Li     {
743*3e777be0SXin Li         // Type android::nn::RunTimePoolInfo has changed between Android P & Q and Android R, where
744*3e777be0SXin Li         // update() has been removed and flush() added.
745*3e777be0SXin Li #if defined(ARMNN_ANDROID_R) || defined(ARMNN_ANDROID_S) // Use the new Android implementation.
746*3e777be0SXin Li         pool.flush();
747*3e777be0SXin Li #else
748*3e777be0SXin Li         pool.update();
749*3e777be0SXin Li #endif
750*3e777be0SXin Li     }
751*3e777be0SXin Li }
752*3e777be0SXin Li 
GetSize(const V1_0::Request & request,const V1_0::RequestArgument & requestArgument)753*3e777be0SXin Li size_t GetSize(const V1_0::Request& request, const V1_0::RequestArgument& requestArgument)
754*3e777be0SXin Li {
755*3e777be0SXin Li     return request.pools[requestArgument.location.poolIndex].size();
756*3e777be0SXin Li }
757*3e777be0SXin Li 
758*3e777be0SXin Li #ifdef ARMNN_ANDROID_NN_V1_3
GetSize(const V1_3::Request & request,const V1_0::RequestArgument & requestArgument)759*3e777be0SXin Li size_t GetSize(const V1_3::Request& request, const V1_0::RequestArgument& requestArgument)
760*3e777be0SXin Li {
761*3e777be0SXin Li     if (request.pools[requestArgument.location.poolIndex].getDiscriminator() ==
762*3e777be0SXin Li         V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory)
763*3e777be0SXin Li     {
764*3e777be0SXin Li         return request.pools[requestArgument.location.poolIndex].hidlMemory().size();
765*3e777be0SXin Li     }
766*3e777be0SXin Li     else
767*3e777be0SXin Li     {
768*3e777be0SXin Li         return 0;
769*3e777be0SXin Li     }
770*3e777be0SXin Li }
771*3e777be0SXin Li #endif
772*3e777be0SXin Li 
773*3e777be0SXin Li template <typename ErrorStatus, typename Request>
ValidateRequestArgument(const Request & request,const armnn::TensorInfo & tensorInfo,const V1_0::RequestArgument & requestArgument,std::string descString)774*3e777be0SXin Li ErrorStatus ValidateRequestArgument(const Request& request,
775*3e777be0SXin Li                                     const armnn::TensorInfo& tensorInfo,
776*3e777be0SXin Li                                     const V1_0::RequestArgument& requestArgument,
777*3e777be0SXin Li                                     std::string descString)
778*3e777be0SXin Li {
779*3e777be0SXin Li     if (requestArgument.location.poolIndex >= request.pools.size())
780*3e777be0SXin Li     {
781*3e777be0SXin Li         std::string err = fmt::format("Invalid {} pool at index {} the pool index is greater than the number "
782*3e777be0SXin Li                                       "of available pools {}",
783*3e777be0SXin Li                                       descString, requestArgument.location.poolIndex, request.pools.size());
784*3e777be0SXin Li         ALOGE(err.c_str());
785*3e777be0SXin Li         return ErrorStatus::GENERAL_FAILURE;
786*3e777be0SXin Li     }
787*3e777be0SXin Li     const size_t size = GetSize(request, requestArgument);
788*3e777be0SXin Li     size_t totalLength = tensorInfo.GetNumBytes();
789*3e777be0SXin Li 
790*3e777be0SXin Li     if (static_cast<size_t>(requestArgument.location.offset) + totalLength > size)
791*3e777be0SXin Li     {
792*3e777be0SXin Li         std::string err = fmt::format("Invalid {} pool at index {} the offset {} and length {} are greater "
793*3e777be0SXin Li                                       "than the pool size {}", descString, requestArgument.location.poolIndex,
794*3e777be0SXin Li                                       requestArgument.location.offset, totalLength, size);
795*3e777be0SXin Li         ALOGE(err.c_str());
796*3e777be0SXin Li         return ErrorStatus::GENERAL_FAILURE;
797*3e777be0SXin Li     }
798*3e777be0SXin Li     return ErrorStatus::NONE;
799*3e777be0SXin Li }
800*3e777be0SXin Li 
801*3e777be0SXin Li template V1_0::ErrorStatus ValidateRequestArgument<V1_0::ErrorStatus, V1_0::Request>(
802*3e777be0SXin Li         const V1_0::Request& request,
803*3e777be0SXin Li         const armnn::TensorInfo& tensorInfo,
804*3e777be0SXin Li         const V1_0::RequestArgument& requestArgument,
805*3e777be0SXin Li         std::string descString);
806*3e777be0SXin Li 
807*3e777be0SXin Li #ifdef ARMNN_ANDROID_NN_V1_3
808*3e777be0SXin Li template V1_3::ErrorStatus ValidateRequestArgument<V1_3::ErrorStatus, V1_3::Request>(
809*3e777be0SXin Li         const V1_3::Request& request,
810*3e777be0SXin Li         const armnn::TensorInfo& tensorInfo,
811*3e777be0SXin Li         const V1_0::RequestArgument& requestArgument,
812*3e777be0SXin Li         std::string descString);
813*3e777be0SXin Li #endif
814*3e777be0SXin Li 
815*3e777be0SXin Li } // namespace armnn_driver
816