xref: /aosp_15_r20/external/armnn/delegate/test/PackTestHelper.hpp (revision 89c4ff92f2867872bb9e2354d150bf0c8c502810)
1 //
2 // Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include "TestUtils.hpp"
9 
10 #include <armnn_delegate.hpp>
11 #include <DelegateTestInterpreter.hpp>
12 
13 #include <flatbuffers/flatbuffers.h>
14 #include <tensorflow/lite/kernels/register.h>
15 #include <tensorflow/lite/version.h>
16 
17 #include <schema_generated.h>
18 
19 #include <doctest/doctest.h>
20 
21 namespace
22 {
23 
CreatePackTfLiteModel(tflite::BuiltinOperator packOperatorCode,tflite::TensorType tensorType,std::vector<int32_t> & inputTensorShape,const std::vector<int32_t> & outputTensorShape,const int32_t inputTensorNum,unsigned int axis=0,float quantScale=1.0f,int quantOffset=0)24 std::vector<char> CreatePackTfLiteModel(tflite::BuiltinOperator packOperatorCode,
25                                         tflite::TensorType tensorType,
26                                         std::vector<int32_t>& inputTensorShape,
27                                         const std::vector <int32_t>& outputTensorShape,
28                                         const int32_t inputTensorNum,
29                                         unsigned int axis = 0,
30                                         float quantScale = 1.0f,
31                                         int quantOffset  = 0)
32 {
33     using namespace tflite;
34     flatbuffers::FlatBufferBuilder flatBufferBuilder;
35 
36     std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
37     buffers.push_back(CreateBuffer(flatBufferBuilder));
38     buffers.push_back(CreateBuffer(flatBufferBuilder));
39 
40     auto quantizationParameters =
41             CreateQuantizationParameters(flatBufferBuilder,
42                                          0,
43                                          0,
44                                          flatBufferBuilder.CreateVector<float>({ quantScale }),
45                                          flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
46 
47     std::vector<int32_t> operatorInputs{};
48     const std::vector<int32_t> operatorOutputs{inputTensorNum};
49     std::vector<int> subgraphInputs{};
50     const std::vector<int> subgraphOutputs{inputTensorNum};
51 
52     std::vector<flatbuffers::Offset<Tensor>> tensors(inputTensorNum + 1);
53     for (int i = 0; i < inputTensorNum; ++i)
54     {
55         tensors[i] = CreateTensor(flatBufferBuilder,
56                                   flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
57                                                                           inputTensorShape.size()),
58                                   tensorType,
59                                   1,
60                                   flatBufferBuilder.CreateString("input" + std::to_string(i)),
61                                   quantizationParameters);
62 
63         // Add number of inputs to vector.
64         operatorInputs.push_back(i);
65         subgraphInputs.push_back(i);
66     }
67 
68     // Create output tensor
69     tensors[inputTensorNum] = CreateTensor(flatBufferBuilder,
70                               flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
71                                                                       outputTensorShape.size()),
72                               tensorType,
73                               0,
74                               flatBufferBuilder.CreateString("output"),
75                               quantizationParameters);
76 
77     // create operator
78     tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_PackOptions;
79     flatbuffers::Offset<void> operatorBuiltinOptions =
80             CreatePackOptions(flatBufferBuilder, inputTensorNum, axis).Union();
81 
82     flatbuffers::Offset <Operator> packOperator =
83             CreateOperator(flatBufferBuilder,
84                            0,
85                            flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
86                            flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
87                            operatorBuiltinOptionsType,
88                            operatorBuiltinOptions);
89 
90     flatbuffers::Offset <SubGraph> subgraph =
91             CreateSubGraph(flatBufferBuilder,
92                            flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
93                            flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
94                            flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
95                            flatBufferBuilder.CreateVector(&packOperator, 1));
96 
97     flatbuffers::Offset <flatbuffers::String> modelDescription =
98             flatBufferBuilder.CreateString("ArmnnDelegate: Pack Operator Model");
99     flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, packOperatorCode);
100 
101     flatbuffers::Offset <Model> flatbufferModel =
102             CreateModel(flatBufferBuilder,
103                         TFLITE_SCHEMA_VERSION,
104                         flatBufferBuilder.CreateVector(&operatorCode, 1),
105                         flatBufferBuilder.CreateVector(&subgraph, 1),
106                         modelDescription,
107                         flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
108 
109     flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
110 
111     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
112                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
113 }
114 
115 template <typename T>
PackTest(tflite::BuiltinOperator packOperatorCode,tflite::TensorType tensorType,std::vector<armnn::BackendId> & backends,std::vector<int32_t> & inputShape,std::vector<int32_t> & expectedOutputShape,std::vector<std::vector<T>> & inputValues,std::vector<T> & expectedOutputValues,unsigned int axis=0,float quantScale=1.0f,int quantOffset=0)116 void PackTest(tflite::BuiltinOperator packOperatorCode,
117               tflite::TensorType tensorType,
118               std::vector<armnn::BackendId>& backends,
119               std::vector<int32_t>& inputShape,
120               std::vector<int32_t>& expectedOutputShape,
121               std::vector<std::vector<T>>& inputValues,
122               std::vector<T>& expectedOutputValues,
123               unsigned int axis = 0,
124               float quantScale = 1.0f,
125               int quantOffset  = 0)
126 {
127     using namespace delegateTestInterpreter;
128     std::vector<char> modelBuffer = CreatePackTfLiteModel(packOperatorCode,
129                                                           tensorType,
130                                                           inputShape,
131                                                           expectedOutputShape,
132                                                           inputValues.size(),
133                                                           axis,
134                                                           quantScale,
135                                                           quantOffset);
136 
137     // Setup interpreter with just TFLite Runtime.
138     auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
139     CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
140 
141     // Setup interpreter with Arm NN Delegate applied.
142     auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
143     CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
144 
145     // Set input data for all input tensors.
146     for (unsigned int i = 0; i < inputValues.size(); ++i)
147     {
148         auto inputTensorValues = inputValues[i];
149         CHECK(tfLiteInterpreter.FillInputTensor<T>(inputTensorValues, i) == kTfLiteOk);
150         CHECK(armnnInterpreter.FillInputTensor<T>(inputTensorValues, i) == kTfLiteOk);
151     }
152 
153     CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
154     std::vector<T>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
155     std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
156 
157     CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
158     std::vector<T>       armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
159     std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
160 
161     armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
162     armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape);
163 
164     tfLiteInterpreter.Cleanup();
165     armnnInterpreter.Cleanup();
166 }
167 
168 } // anonymous namespace