xref: /aosp_15_r20/external/android-nn-driver/Utils.cpp (revision 3e777be0405cee09af5d5785ff37f7cfb5bee59a)
1 //
2 // Copyright © 2017-2021,2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #define LOG_TAG "ArmnnDriver"
7 
8 #include "Utils.hpp"
9 #include "Half.hpp"
10 
11 #include <armnnSerializer/ISerializer.hpp>
12 #include <armnnUtils/Filesystem.hpp>
13 #include <armnnUtils/Permute.hpp>
14 
15 #include <armnn/Utils.hpp>
16 #include <log/log.h>
17 
18 #include <cerrno>
19 #include <cinttypes>
20 #include <sstream>
21 #include <cstdio>
22 #include <time.h>
23 #include <string>
24 #include <span>
25 
26 using namespace android;
27 using namespace android::hardware;
28 using namespace android::hidl::memory::V1_0;
29 
30 namespace armnn_driver
31 {
32 const armnn::PermutationVector g_DontPermute{};
33 
SwizzleAndroidNn4dTensorToArmNn(armnn::TensorInfo & tensorInfo,const void * input,void * output,const armnn::PermutationVector & mappings)34 void SwizzleAndroidNn4dTensorToArmNn(armnn::TensorInfo& tensorInfo, const void* input, void* output,
35                                      const armnn::PermutationVector& mappings)
36 {
37     if (tensorInfo.GetNumDimensions() != 4U)
38     {
39         throw armnn::InvalidArgumentException("NumDimensions must be 4");
40     }
41     armnn::DataType dataType = tensorInfo.GetDataType();
42     switch (dataType)
43     {
44     case armnn::DataType::Float16:
45     case armnn::DataType::Float32:
46     case armnn::DataType::QAsymmU8:
47     case armnn::DataType::QSymmS16:
48     case armnn::DataType::QSymmS8:
49     case armnn::DataType::QAsymmS8:
50         // First swizzle tensor info
51         tensorInfo = armnnUtils::Permuted(tensorInfo, mappings);
52         // Then swizzle tensor data
53         armnnUtils::Permute(tensorInfo.GetShape(), mappings, input, output, armnn::GetDataTypeSize(dataType));
54         break;
55     default:
56         throw armnn::InvalidArgumentException("Unknown DataType for swizzling");
57     }
58 }
59 
60 template<typename Dimensions>
GetDimensionsSpecificity(const Dimensions & dimensions)61 auto GetDimensionsSpecificity(const Dimensions& dimensions)
62 {
63     // We can't use std::vector<bool> since that is a specialization that packs
64     // bits, so use a string of bools instead. This also has the benefit of
65     // using small string optimization.
66     std::basic_string<bool> specificity(dimensions.size(), false);
67 
68     for (std::size_t i = 0; i < dimensions.size(); ++i)
69     {
70         specificity[i] = dimensions.data()[i] != 0;
71     }
72 
73     return specificity;
74 }
75 
GetMemoryFromPool(V1_0::DataLocation location,const std::vector<android::nn::RunTimePoolInfo> & memPools)76 void* GetMemoryFromPool(V1_0::DataLocation location, const std::vector<android::nn::RunTimePoolInfo>& memPools)
77 {
78     // find the location within the pool
79     if (location.poolIndex >= memPools.size())
80     {
81         throw armnn::InvalidArgumentException("The poolIndex is greater than the memPools size.");
82     }
83 
84     const android::nn::RunTimePoolInfo& memPool = memPools[location.poolIndex];
85 
86     uint8_t* memPoolBuffer = memPool.getBuffer();
87 
88     uint8_t* memory = memPoolBuffer + location.offset;
89 
90     return memory;
91 }
92 
GetTensorInfoForOperand(const V1_0::Operand & operand)93 armnn::TensorInfo GetTensorInfoForOperand(const V1_0::Operand& operand)
94 {
95     using namespace armnn;
96     DataType type;
97 
98     switch (operand.type)
99     {
100         case V1_0::OperandType::TENSOR_FLOAT32:
101             type = armnn::DataType::Float32;
102             break;
103         case V1_0::OperandType::TENSOR_QUANT8_ASYMM:
104             type = armnn::DataType::QAsymmU8;
105             break;
106         case V1_0::OperandType::TENSOR_INT32:
107             type = armnn::DataType::Signed32;
108             break;
109         default:
110             throw UnsupportedOperand<V1_0::OperandType>(operand.type);
111     }
112 
113     TensorInfo ret;
114     if (operand.dimensions.size() == 0)
115     {
116         TensorShape tensorShape(Dimensionality::NotSpecified);
117         ret = TensorInfo(tensorShape, type);
118     }
119     else
120     {
121         auto dimensionsSpecificity = GetDimensionsSpecificity(operand.dimensions);
122         TensorShape tensorShape(operand.dimensions.size(), operand.dimensions.data(), dimensionsSpecificity.data());
123         ret = TensorInfo(tensorShape, type);
124     }
125 
126     ret.SetQuantizationScale(operand.scale);
127     ret.SetQuantizationOffset(operand.zeroPoint);
128 
129     return ret;
130 }
131 
132 #if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)// Using ::android::hardware::neuralnetworks::V1_2
133 
GetTensorInfoForOperand(const V1_2::Operand & operand)134 armnn::TensorInfo GetTensorInfoForOperand(const V1_2::Operand& operand)
135 {
136     using namespace armnn;
137     bool perChannel = false;
138 
139     DataType type;
140     switch (operand.type)
141     {
142         case V1_2::OperandType::TENSOR_BOOL8:
143             type = armnn::DataType::Boolean;
144             break;
145         case V1_2::OperandType::TENSOR_FLOAT32:
146             type = armnn::DataType::Float32;
147             break;
148         case V1_2::OperandType::TENSOR_FLOAT16:
149             type = armnn::DataType::Float16;
150             break;
151         case V1_2::OperandType::TENSOR_QUANT8_ASYMM:
152             type = armnn::DataType::QAsymmU8;
153             break;
154         case V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
155             perChannel=true;
156             ARMNN_FALLTHROUGH;
157         case V1_2::OperandType::TENSOR_QUANT8_SYMM:
158             type = armnn::DataType::QSymmS8;
159             break;
160         case V1_2::OperandType::TENSOR_QUANT16_SYMM:
161             type = armnn::DataType::QSymmS16;
162             break;
163         case V1_2::OperandType::TENSOR_INT32:
164             type = armnn::DataType::Signed32;
165             break;
166         default:
167             throw UnsupportedOperand<V1_2::OperandType>(operand.type);
168     }
169 
170     TensorInfo ret;
171     if (operand.dimensions.size() == 0)
172     {
173         TensorShape tensorShape(Dimensionality::NotSpecified);
174         ret = TensorInfo(tensorShape, type);
175     }
176     else
177     {
178         auto dimensionsSpecificity = GetDimensionsSpecificity(operand.dimensions);
179         TensorShape tensorShape(operand.dimensions.size(), operand.dimensions.data(), dimensionsSpecificity.data());
180         ret = TensorInfo(tensorShape, type);
181     }
182 
183     if (perChannel)
184     {
185         if (operand.extraParams.getDiscriminator() != V1_2::Operand::ExtraParams::hidl_discriminator::channelQuant)
186         {
187             throw armnn::InvalidArgumentException("ExtraParams is expected to be of type channelQuant");
188         }
189 
190         auto perAxisQuantParams = operand.extraParams.channelQuant();
191 
192         ret.SetQuantizationScales(perAxisQuantParams.scales);
193         ret.SetQuantizationDim(MakeOptional<unsigned int>(perAxisQuantParams.channelDim));
194     }
195     else
196     {
197         ret.SetQuantizationScale(operand.scale);
198         ret.SetQuantizationOffset(operand.zeroPoint);
199     }
200 
201     return ret;
202 }
203 
204 #endif
205 
206 #ifdef ARMNN_ANDROID_NN_V1_3 // Using ::android::hardware::neuralnetworks::V1_3
207 
GetTensorInfoForOperand(const V1_3::Operand & operand)208 armnn::TensorInfo GetTensorInfoForOperand(const V1_3::Operand& operand)
209 {
210     using namespace armnn;
211     bool perChannel = false;
212     bool isScalar   = false;
213 
214     DataType type;
215     switch (operand.type)
216     {
217         case V1_3::OperandType::TENSOR_BOOL8:
218             type = armnn::DataType::Boolean;
219             break;
220         case V1_3::OperandType::TENSOR_FLOAT32:
221             type = armnn::DataType::Float32;
222             break;
223         case V1_3::OperandType::TENSOR_FLOAT16:
224             type = armnn::DataType::Float16;
225             break;
226         case V1_3::OperandType::TENSOR_QUANT8_ASYMM:
227             type = armnn::DataType::QAsymmU8;
228             break;
229         case V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
230             perChannel=true;
231             ARMNN_FALLTHROUGH;
232         case V1_3::OperandType::TENSOR_QUANT8_SYMM:
233             type = armnn::DataType::QSymmS8;
234             break;
235         case V1_3::OperandType::TENSOR_QUANT16_SYMM:
236             type = armnn::DataType::QSymmS16;
237             break;
238         case V1_3::OperandType::TENSOR_INT32:
239             type = armnn::DataType::Signed32;
240             break;
241         case V1_3::OperandType::INT32:
242             type = armnn::DataType::Signed32;
243             isScalar = true;
244             break;
245         case V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
246             type = armnn::DataType::QAsymmS8;
247             break;
248         default:
249             throw UnsupportedOperand<V1_3::OperandType>(operand.type);
250     }
251 
252     TensorInfo ret;
253     if (isScalar)
254     {
255         ret = TensorInfo(TensorShape(armnn::Dimensionality::Scalar), type);
256     }
257     else
258     {
259         if (operand.dimensions.size() == 0)
260         {
261             TensorShape tensorShape(Dimensionality::NotSpecified);
262             ret = TensorInfo(tensorShape, type);
263         }
264         else
265         {
266             auto dimensionsSpecificity = GetDimensionsSpecificity(operand.dimensions);
267             TensorShape tensorShape(operand.dimensions.size(), operand.dimensions.data(), dimensionsSpecificity.data());
268             ret = TensorInfo(tensorShape, type);
269         }
270     }
271 
272     if (perChannel)
273     {
274         // ExtraParams is expected to be of type channelQuant
275         if (operand.extraParams.getDiscriminator() != V1_2::Operand::ExtraParams::hidl_discriminator::channelQuant)
276         {
277             throw armnn::InvalidArgumentException("ExtraParams is expected to be of type channelQuant");
278         }
279         auto perAxisQuantParams = operand.extraParams.channelQuant();
280 
281         ret.SetQuantizationScales(perAxisQuantParams.scales);
282         ret.SetQuantizationDim(MakeOptional<unsigned int>(perAxisQuantParams.channelDim));
283     }
284     else
285     {
286         ret.SetQuantizationScale(operand.scale);
287         ret.SetQuantizationOffset(operand.zeroPoint);
288     }
289     return ret;
290 }
291 
292 #endif
293 
GetOperandSummary(const V1_0::Operand & operand)294 std::string GetOperandSummary(const V1_0::Operand& operand)
295 {
296     return android::hardware::details::arrayToString(operand.dimensions, operand.dimensions.size()) + " " +
297         toString(operand.type);
298 }
299 
300 #if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3) // Using ::android::hardware::neuralnetworks::V1_2
301 
GetOperandSummary(const V1_2::Operand & operand)302 std::string GetOperandSummary(const V1_2::Operand& operand)
303 {
304     return android::hardware::details::arrayToString(operand.dimensions, operand.dimensions.size()) + " " +
305            toString(operand.type);
306 }
307 
308 #endif
309 
310 #ifdef ARMNN_ANDROID_NN_V1_3 // Using ::android::hardware::neuralnetworks::V1_3
311 
GetOperandSummary(const V1_3::Operand & operand)312 std::string GetOperandSummary(const V1_3::Operand& operand)
313 {
314     return android::hardware::details::arrayToString(operand.dimensions, operand.dimensions.size()) + " " +
315            toString(operand.type);
316 }
317 
318 #endif
319 
320 template <typename TensorType>
321 using DumpElementFunction = void (*)(const TensorType& tensor,
322     unsigned int elementIndex,
323     std::ofstream& fileStream);
324 
325 namespace
326 {
327 template <typename TensorType, typename ElementType, typename PrintableType = ElementType>
DumpTensorElement(const TensorType & tensor,unsigned int elementIndex,std::ofstream & fileStream)328 void DumpTensorElement(const TensorType& tensor, unsigned int elementIndex, std::ofstream& fileStream)
329 {
330     const ElementType* elements = reinterpret_cast<const ElementType*>(tensor.GetMemoryArea());
331     fileStream << static_cast<PrintableType>(elements[elementIndex]) << " ";
332 }
333 
334 } // namespace
335 
336 template <typename TensorType>
DumpTensor(const std::string & dumpDir,const std::string & requestName,const std::string & tensorName,const TensorType & tensor)337 void DumpTensor(const std::string& dumpDir,
338     const std::string& requestName,
339     const std::string& tensorName,
340     const TensorType& tensor)
341 {
342     // The dump directory must exist in advance.
343     fs::path dumpPath = dumpDir;
344     const fs::path fileName = dumpPath / (requestName + "_" + tensorName + ".dump");
345 
346     std::ofstream fileStream;
347     fileStream.open(fileName.c_str(), std::ofstream::out | std::ofstream::trunc);
348 
349     if (!fileStream.good())
350     {
351         ALOGW("Could not open file %s for writing", fileName.c_str());
352         return;
353     }
354 
355     DumpElementFunction<TensorType> dumpElementFunction = nullptr;
356 
357     switch (tensor.GetDataType())
358     {
359         case armnn::DataType::Float32:
360         {
361             dumpElementFunction = &DumpTensorElement<TensorType, float>;
362             break;
363         }
364         case armnn::DataType::QAsymmU8:
365         {
366             dumpElementFunction = &DumpTensorElement<TensorType, uint8_t, uint32_t>;
367             break;
368         }
369         case armnn::DataType::Signed32:
370         {
371             dumpElementFunction = &DumpTensorElement<TensorType, int32_t>;
372             break;
373         }
374         case armnn::DataType::Float16:
375         {
376             dumpElementFunction = &DumpTensorElement<TensorType, armnn::Half>;
377             break;
378         }
379         case armnn::DataType::QAsymmS8:
380         {
381             dumpElementFunction = &DumpTensorElement<TensorType, int8_t, int32_t>;
382             break;
383         }
384         case armnn::DataType::Boolean:
385         {
386             dumpElementFunction = &DumpTensorElement<TensorType, bool>;
387             break;
388         }
389         default:
390         {
391             dumpElementFunction = nullptr;
392         }
393     }
394 
395     if (dumpElementFunction != nullptr)
396     {
397         const unsigned int numDimensions = tensor.GetNumDimensions();
398         const armnn::TensorShape shape = tensor.GetShape();
399 
400         if (!shape.AreAllDimensionsSpecified())
401         {
402             fileStream << "Cannot dump tensor elements: not all dimensions are specified" << std::endl;
403             return;
404         }
405         fileStream << "# Number of elements " << tensor.GetNumElements() << std::endl;
406 
407         if (numDimensions == 0)
408         {
409             fileStream << "# Shape []" << std::endl;
410             return;
411         }
412         fileStream << "# Shape [" << shape[0];
413         for (unsigned int d = 1; d < numDimensions; ++d)
414         {
415             fileStream << "," << shape[d];
416         }
417         fileStream << "]" << std::endl;
418         fileStream << "Each line contains the data of each of the elements of dimension0. In NCHW and NHWC, each line"
419                       " will be a batch" << std::endl << std::endl;
420 
421         // Split will create a new line after all elements of the first dimension
422         // (in a 4, 3, 2, 3 tensor, there will be 4 lines of 18 elements)
423         unsigned int split = 1;
424         if (numDimensions == 1)
425         {
426             split = shape[0];
427         }
428         else
429         {
430             for (unsigned int i = 1; i < numDimensions; ++i)
431             {
432                 split *= shape[i];
433             }
434         }
435 
436         // Print all elements in the tensor
437         for (unsigned int elementIndex = 0; elementIndex < tensor.GetNumElements(); ++elementIndex)
438         {
439             (*dumpElementFunction)(tensor, elementIndex, fileStream);
440 
441             if ( (elementIndex + 1) % split == 0 )
442             {
443                 fileStream << std::endl;
444             }
445         }
446         fileStream << std::endl;
447     }
448     else
449     {
450         fileStream << "Cannot dump tensor elements: Unsupported data type "
451             << static_cast<unsigned int>(tensor.GetDataType()) << std::endl;
452     }
453 
454     if (!fileStream.good())
455     {
456         ALOGW("An error occurred when writing to file %s", fileName.c_str());
457     }
458 }
459 
460 
461 template void DumpTensor<armnn::ConstTensor>(const std::string& dumpDir,
462                                              const std::string& requestName,
463                                              const std::string& tensorName,
464                                              const armnn::ConstTensor& tensor);
465 
466 template void DumpTensor<armnn::Tensor>(const std::string& dumpDir,
467                                         const std::string& requestName,
468                                         const std::string& tensorName,
469                                         const armnn::Tensor& tensor);
470 
DumpJsonProfilingIfRequired(bool gpuProfilingEnabled,const std::string & dumpDir,armnn::NetworkId networkId,const armnn::IProfiler * profiler)471 void DumpJsonProfilingIfRequired(bool gpuProfilingEnabled,
472                                  const std::string& dumpDir,
473                                  armnn::NetworkId networkId,
474                                  const armnn::IProfiler* profiler)
475 {
476     // Check if profiling is required.
477     if (!gpuProfilingEnabled)
478     {
479         return;
480     }
481 
482     // The dump directory must exist in advance.
483     if (dumpDir.empty())
484     {
485         return;
486     }
487 
488     if (!profiler)
489     {
490         ALOGW("profiler was null");
491         return;
492     }
493 
494     // Set the name of the output profiling file.
495     fs::path dumpPath = dumpDir;
496     const fs::path fileName = dumpPath / (std::to_string(networkId) + "_profiling.json");
497 
498     // Open the ouput file for writing.
499     std::ofstream fileStream;
500     fileStream.open(fileName.c_str(), std::ofstream::out | std::ofstream::trunc);
501 
502     if (!fileStream.good())
503     {
504         ALOGW("Could not open file %s for writing", fileName.c_str());
505         return;
506     }
507 
508     // Write the profiling info to a JSON file.
509     profiler->Print(fileStream);
510 }
511 
ExportNetworkGraphToDotFile(const armnn::IOptimizedNetwork & optimizedNetwork,const std::string & dumpDir)512 std::string ExportNetworkGraphToDotFile(const armnn::IOptimizedNetwork& optimizedNetwork,
513                                         const std::string& dumpDir)
514 {
515     std::string fileName;
516     // The dump directory must exist in advance.
517     if (dumpDir.empty())
518     {
519         return fileName;
520     }
521 
522     std::string timestamp = GetFileTimestamp();
523     if (timestamp.empty())
524     {
525         return fileName;
526     }
527 
528     // Set the name of the output .dot file.
529     fs::path dumpPath = dumpDir;
530     fs::path tempFilePath = dumpPath / (timestamp + "_networkgraph.dot");
531     fileName = tempFilePath.string();
532 
533     ALOGV("Exporting the optimized network graph to file: %s", fileName.c_str());
534 
535     // Write the network graph to a dot file.
536     std::ofstream fileStream;
537     fileStream.open(fileName, std::ofstream::out | std::ofstream::trunc);
538 
539     if (!fileStream.good())
540     {
541         ALOGW("Could not open file %s for writing", fileName.c_str());
542         return fileName;
543     }
544 
545     if (optimizedNetwork.SerializeToDot(fileStream) != armnn::Status::Success)
546     {
547         ALOGW("An error occurred when writing to file %s", fileName.c_str());
548     }
549     return fileName;
550 }
551 
SerializeNetwork(const armnn::INetwork & network,const std::string & dumpDir,std::vector<uint8_t> & dataCacheData,bool dataCachingActive)552 std::string SerializeNetwork(const armnn::INetwork& network,
553                              const std::string& dumpDir,
554                              std::vector<uint8_t>& dataCacheData,
555                              bool dataCachingActive)
556 {
557     std::string fileName;
558     bool bSerializeToFile = true;
559     if (dumpDir.empty())
560     {
561         bSerializeToFile = false;
562     }
563     else
564     {
565         std::string timestamp = GetFileTimestamp();
566         if (timestamp.empty())
567         {
568             bSerializeToFile = false;
569         }
570     }
571     if (!bSerializeToFile && !dataCachingActive)
572     {
573         return fileName;
574     }
575 
576     auto serializer(armnnSerializer::ISerializer::Create());
577     // Serialize the Network
578     serializer->Serialize(network);
579     if (dataCachingActive)
580     {
581         std::stringstream stream;
582         auto serialized = serializer->SaveSerializedToStream(stream);
583         if (serialized)
584         {
585             std::string const serializedString{stream.str()};
586             std::copy(serializedString.begin(), serializedString.end(), std::back_inserter(dataCacheData));
587         }
588     }
589 
590     if (bSerializeToFile)
591     {
592         // Set the name of the output .armnn file.
593         fs::path dumpPath = dumpDir;
594         std::string timestamp = GetFileTimestamp();
595         fs::path tempFilePath = dumpPath / (timestamp + "_network.armnn");
596         fileName = tempFilePath.string();
597 
598         // Save serialized network to a file
599         std::ofstream serializedFile(fileName, std::ios::out | std::ios::binary);
600         auto serialized = serializer->SaveSerializedToStream(serializedFile);
601         if (!serialized)
602         {
603             ALOGW("An error occurred when serializing to file %s", fileName.c_str());
604         }
605     }
606     return fileName;
607 }
608 
IsDynamicTensor(const armnn::TensorInfo & tensorInfo)609 bool IsDynamicTensor(const armnn::TensorInfo& tensorInfo)
610 {
611     if (tensorInfo.GetShape().GetDimensionality() == armnn::Dimensionality::NotSpecified)
612     {
613         return true;
614     }
615     // Account for the usage of the TensorShape empty constructor
616     if (tensorInfo.GetNumDimensions() == 0)
617     {
618         return true;
619     }
620     return !tensorInfo.GetShape().AreAllDimensionsSpecified();
621 }
622 
AreDynamicTensorsSupported()623 bool AreDynamicTensorsSupported()
624 {
625 #if defined(ARMNN_ANDROID_NN_V1_3)
626     return true;
627 #else
628     return false;
629 #endif
630 }
631 
isQuantizedOperand(const V1_0::OperandType & operandType)632 bool isQuantizedOperand(const V1_0::OperandType& operandType)
633 {
634     if (operandType == V1_0::OperandType::TENSOR_QUANT8_ASYMM)
635     {
636         return true;
637     }
638     else
639     {
640         return false;
641     }
642 }
643 
644 #if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)// Using ::android::hardware::neuralnetworks::V1_2
isQuantizedOperand(const V1_2::OperandType & operandType)645 bool isQuantizedOperand(const V1_2::OperandType& operandType)
646 {
647     if (operandType == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
648         operandType == V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
649         operandType == V1_2::OperandType::TENSOR_QUANT8_SYMM ||
650         operandType == V1_2::OperandType::TENSOR_QUANT16_SYMM )
651     {
652         return true;
653     }
654     else
655     {
656         return false;
657     }
658 }
659 #endif
660 
661 #ifdef ARMNN_ANDROID_NN_V1_3 // Using ::android::hardware::neuralnetworks::V1_3
isQuantizedOperand(const V1_3::OperandType & operandType)662 bool isQuantizedOperand(const V1_3::OperandType& operandType)
663 {
664     if (operandType == V1_3::OperandType::TENSOR_QUANT8_ASYMM ||
665         operandType == V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
666         operandType == V1_3::OperandType::TENSOR_QUANT8_SYMM ||
667         operandType == V1_3::OperandType::TENSOR_QUANT16_SYMM ||
668         operandType == V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED)
669     {
670         return true;
671     }
672     else
673     {
674         return false;
675     }
676 }
677 #endif
678 
GetFileTimestamp()679 std::string GetFileTimestamp()
680 {
681     // used to get a timestamp to name diagnostic files (the ArmNN serialized graph
682     // and getSupportedOperations.txt files)
683     timespec ts;
684     int iRet = clock_gettime(CLOCK_MONOTONIC_RAW, &ts);
685     std::stringstream ss;
686     if (iRet == 0)
687     {
688         ss << std::to_string(ts.tv_sec) << "_" << std::to_string(ts.tv_nsec);
689     }
690     else
691     {
692         ALOGW("clock_gettime failed with errno %s : %s", std::to_string(errno).c_str(), std::strerror(errno));
693     }
694     return ss.str();
695 }
696 
RenameExportedFiles(const std::string & existingSerializedFileName,const std::string & existingDotFileName,const std::string & dumpDir,const armnn::NetworkId networkId)697 void RenameExportedFiles(const std::string& existingSerializedFileName,
698                          const std::string& existingDotFileName,
699                          const std::string& dumpDir,
700                          const armnn::NetworkId networkId)
701 {
702     if (dumpDir.empty())
703     {
704         return;
705     }
706     RenameFile(existingSerializedFileName, std::string("_network.armnn"), dumpDir, networkId);
707     RenameFile(existingDotFileName, std::string("_networkgraph.dot"), dumpDir, networkId);
708 }
709 
RenameFile(const std::string & existingName,const std::string & extension,const std::string & dumpDir,const armnn::NetworkId networkId)710 void RenameFile(const std::string& existingName,
711                 const std::string& extension,
712                 const std::string& dumpDir,
713                 const armnn::NetworkId networkId)
714 {
715     if (existingName.empty() || dumpDir.empty())
716     {
717         return;
718     }
719 
720     fs::path dumpPath = dumpDir;
721     const fs::path newFileName = dumpPath / (std::to_string(networkId) + extension);
722     int iRet = rename(existingName.c_str(), newFileName.c_str());
723     if (iRet != 0)
724     {
725         std::stringstream ss;
726         ss << "rename of [" << existingName << "] to [" << newFileName << "] failed with errno "
727            << std::to_string(errno) << " : " << std::strerror(errno);
728         ALOGW(ss.str().c_str());
729     }
730 }
731 
CommitPools(std::vector<::android::nn::RunTimePoolInfo> & memPools)732 void CommitPools(std::vector<::android::nn::RunTimePoolInfo>& memPools)
733 {
734     if (memPools.empty())
735     {
736         return;
737     }
738     // Commit output buffers.
739     // Note that we update *all* pools, even if they aren't actually used as outputs -
740     // this is simpler and is what the CpuExecutor does.
741     for (auto& pool : memPools)
742     {
743         // Type android::nn::RunTimePoolInfo has changed between Android P & Q and Android R, where
744         // update() has been removed and flush() added.
745 #if defined(ARMNN_ANDROID_R) || defined(ARMNN_ANDROID_S) // Use the new Android implementation.
746         pool.flush();
747 #else
748         pool.update();
749 #endif
750     }
751 }
752 
GetSize(const V1_0::Request & request,const V1_0::RequestArgument & requestArgument)753 size_t GetSize(const V1_0::Request& request, const V1_0::RequestArgument& requestArgument)
754 {
755     return request.pools[requestArgument.location.poolIndex].size();
756 }
757 
758 #ifdef ARMNN_ANDROID_NN_V1_3
GetSize(const V1_3::Request & request,const V1_0::RequestArgument & requestArgument)759 size_t GetSize(const V1_3::Request& request, const V1_0::RequestArgument& requestArgument)
760 {
761     if (request.pools[requestArgument.location.poolIndex].getDiscriminator() ==
762         V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory)
763     {
764         return request.pools[requestArgument.location.poolIndex].hidlMemory().size();
765     }
766     else
767     {
768         return 0;
769     }
770 }
771 #endif
772 
773 template <typename ErrorStatus, typename Request>
ValidateRequestArgument(const Request & request,const armnn::TensorInfo & tensorInfo,const V1_0::RequestArgument & requestArgument,std::string descString)774 ErrorStatus ValidateRequestArgument(const Request& request,
775                                     const armnn::TensorInfo& tensorInfo,
776                                     const V1_0::RequestArgument& requestArgument,
777                                     std::string descString)
778 {
779     if (requestArgument.location.poolIndex >= request.pools.size())
780     {
781         std::string err = fmt::format("Invalid {} pool at index {} the pool index is greater than the number "
782                                       "of available pools {}",
783                                       descString, requestArgument.location.poolIndex, request.pools.size());
784         ALOGE(err.c_str());
785         return ErrorStatus::GENERAL_FAILURE;
786     }
787     const size_t size = GetSize(request, requestArgument);
788     size_t totalLength = tensorInfo.GetNumBytes();
789 
790     if (static_cast<size_t>(requestArgument.location.offset) + totalLength > size)
791     {
792         std::string err = fmt::format("Invalid {} pool at index {} the offset {} and length {} are greater "
793                                       "than the pool size {}", descString, requestArgument.location.poolIndex,
794                                       requestArgument.location.offset, totalLength, size);
795         ALOGE(err.c_str());
796         return ErrorStatus::GENERAL_FAILURE;
797     }
798     return ErrorStatus::NONE;
799 }
800 
801 template V1_0::ErrorStatus ValidateRequestArgument<V1_0::ErrorStatus, V1_0::Request>(
802         const V1_0::Request& request,
803         const armnn::TensorInfo& tensorInfo,
804         const V1_0::RequestArgument& requestArgument,
805         std::string descString);
806 
807 #ifdef ARMNN_ANDROID_NN_V1_3
808 template V1_3::ErrorStatus ValidateRequestArgument<V1_3::ErrorStatus, V1_3::Request>(
809         const V1_3::Request& request,
810         const armnn::TensorInfo& tensorInfo,
811         const V1_0::RequestArgument& requestArgument,
812         std::string descString);
813 #endif
814 
815 } // namespace armnn_driver
816