xref: /aosp_15_r20/external/armnn/shim/sl/canonical/CanonicalUtils.hpp (revision 89c4ff92f2867872bb9e2354d150bf0c8c502810)
1 //
2 // Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 #include <armnn/ArmNN.hpp>
8 
9 #include <CpuExecutor.h>
10 #include <nnapi/OperandTypes.h>
11 #include <nnapi/Result.h>
12 #include <nnapi/Types.h>
13 
14 #include <vector>
15 #include <string>
16 #include <fstream>
17 #include <iomanip>
18 
19 namespace armnn_driver
20 {
21 
22 using namespace android::nn;
23 
24 extern const armnn::PermutationVector g_DontPermute;
25 
26 template <typename OperandType>
27 class UnsupportedOperand: public std::runtime_error
28 {
29 public:
UnsupportedOperand(const OperandType type)30     UnsupportedOperand(const OperandType type)
31         : std::runtime_error("Operand type is unsupported")
32         , m_type(type)
33     {}
34 
35     OperandType m_type;
36 };
37 
38 /// Swizzles tensor data in @a input according to the dimension mappings.
39 void SwizzleAndroidNn4dTensorToArmNn(armnn::TensorInfo& tensor,
40                                      const void* input,
41                                      void* output,
42                                      const armnn::PermutationVector& mappings);
43 
44 /// Returns a pointer to a specific location in a pool`
45 void* GetMemoryFromPool(DataLocation location,
46                         const std::vector<android::nn::RunTimePoolInfo>& memPools);
47 
48 void* GetMemoryFromPointer(const Request::Argument& requestArg);
49 
50 armnn::TensorInfo GetTensorInfoForOperand(const Operand& operand);
51 
52 std::string GetOperandSummary(const Operand& operand);
53 
54 bool isQuantizedOperand(const OperandType& operandType);
55 
56 std::string GetModelSummary(const Model& model);
57 
58 template <typename TensorType>
59 void DumpTensor(const std::string& dumpDir,
60                 const std::string& requestName,
61                 const std::string& tensorName,
62                 const TensorType& tensor);
63 
64 void DumpJsonProfilingIfRequired(bool gpuProfilingEnabled,
65                                  const std::string& dumpDir,
66                                  armnn::NetworkId networkId,
67                                  const armnn::IProfiler* profiler);
68 
69 std::string ExportNetworkGraphToDotFile(const armnn::IOptimizedNetwork& optimizedNetwork,
70                                         const std::string& dumpDir);
71 
72 std::string SerializeNetwork(const armnn::INetwork& network,
73                              const std::string& dumpDir,
74                              std::vector<uint8_t>& dataCacheData,
75                              bool dataCachingActive = true);
76 
77 void RenameExportedFiles(const std::string& existingSerializedFileName,
78                          const std::string& existingDotFileName,
79                          const std::string& dumpDir,
80                          const armnn::NetworkId networkId);
81 
82 void RenameFile(const std::string& existingName,
83                 const std::string& extension,
84                 const std::string& dumpDir,
85                 const armnn::NetworkId networkId);
86 
87 /// Checks if a tensor info represents a dynamic tensor
88 bool IsDynamicTensor(const armnn::TensorInfo& outputInfo);
89 
90 /// Checks for ArmNN support of dynamic tensors.
91 bool AreDynamicTensorsSupported(void);
92 
93 std::string GetFileTimestamp();
94 
ComputeShape(const armnn::TensorInfo & info)95 inline OutputShape ComputeShape(const armnn::TensorInfo& info)
96 {
97     OutputShape shape;
98 
99     armnn::TensorShape tensorShape = info.GetShape();
100     // Android will expect scalars as a zero dimensional tensor
101     if(tensorShape.GetDimensionality() == armnn::Dimensionality::Scalar)
102     {
103          shape.dimensions = std::vector<uint32_t>{};
104     }
105     else
106     {
107         std::vector<uint32_t> dimensions;
108         const unsigned int numDims = tensorShape.GetNumDimensions();
109         dimensions.resize(numDims);
110         for (unsigned int outputIdx = 0u; outputIdx < numDims; ++outputIdx)
111         {
112             dimensions[outputIdx] = tensorShape[outputIdx];
113         }
114         shape.dimensions = dimensions;
115     }
116 
117     shape.isSufficient = true;
118 
119     return shape;
120 }
121 
122 void CommitPools(std::vector<::android::nn::RunTimePoolInfo>& memPools);
123 
124 } // namespace armnn_driver
125