1*89c4ff92SAndroid Build Coastguard Worker //
2*89c4ff92SAndroid Build Coastguard Worker // Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
3*89c4ff92SAndroid Build Coastguard Worker // SPDX-License-Identifier: MIT
4*89c4ff92SAndroid Build Coastguard Worker //
5*89c4ff92SAndroid Build Coastguard Worker
6*89c4ff92SAndroid Build Coastguard Worker #pragma once
7*89c4ff92SAndroid Build Coastguard Worker
8*89c4ff92SAndroid Build Coastguard Worker #include "TestUtils.hpp"
9*89c4ff92SAndroid Build Coastguard Worker
10*89c4ff92SAndroid Build Coastguard Worker #include <armnn_delegate.hpp>
11*89c4ff92SAndroid Build Coastguard Worker #include <DelegateTestInterpreter.hpp>
12*89c4ff92SAndroid Build Coastguard Worker
13*89c4ff92SAndroid Build Coastguard Worker #include <flatbuffers/flatbuffers.h>
14*89c4ff92SAndroid Build Coastguard Worker #include <tensorflow/lite/kernels/register.h>
15*89c4ff92SAndroid Build Coastguard Worker #include <tensorflow/lite/version.h>
16*89c4ff92SAndroid Build Coastguard Worker
17*89c4ff92SAndroid Build Coastguard Worker #include <schema_generated.h>
18*89c4ff92SAndroid Build Coastguard Worker
19*89c4ff92SAndroid Build Coastguard Worker #include <doctest/doctest.h>
20*89c4ff92SAndroid Build Coastguard Worker
21*89c4ff92SAndroid Build Coastguard Worker namespace
22*89c4ff92SAndroid Build Coastguard Worker {
23*89c4ff92SAndroid Build Coastguard Worker
CreateRedefineTfLiteModel(tflite::BuiltinOperator redefineOperatorCode,tflite::TensorType tensorType,const std::vector<int32_t> & inputTensorShape,const std::vector<int32_t> & outputTensorShape,const std::vector<int32_t> & targetShape,bool useOption=true,float quantScale=1.0f,int quantOffset=0)24*89c4ff92SAndroid Build Coastguard Worker std::vector<char> CreateRedefineTfLiteModel(
25*89c4ff92SAndroid Build Coastguard Worker tflite::BuiltinOperator redefineOperatorCode,
26*89c4ff92SAndroid Build Coastguard Worker tflite::TensorType tensorType,
27*89c4ff92SAndroid Build Coastguard Worker const std::vector<int32_t>& inputTensorShape,
28*89c4ff92SAndroid Build Coastguard Worker const std::vector<int32_t>& outputTensorShape,
29*89c4ff92SAndroid Build Coastguard Worker const std::vector<int32_t>& targetShape,
30*89c4ff92SAndroid Build Coastguard Worker bool useOption = true,
31*89c4ff92SAndroid Build Coastguard Worker float quantScale = 1.0f,
32*89c4ff92SAndroid Build Coastguard Worker int quantOffset = 0)
33*89c4ff92SAndroid Build Coastguard Worker {
34*89c4ff92SAndroid Build Coastguard Worker using namespace tflite;
35*89c4ff92SAndroid Build Coastguard Worker flatbuffers::FlatBufferBuilder flatBufferBuilder;
36*89c4ff92SAndroid Build Coastguard Worker std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
37*89c4ff92SAndroid Build Coastguard Worker buffers.push_back(CreateBuffer(flatBufferBuilder));
38*89c4ff92SAndroid Build Coastguard Worker buffers.push_back(CreateBuffer(flatBufferBuilder));
39*89c4ff92SAndroid Build Coastguard Worker
40*89c4ff92SAndroid Build Coastguard Worker auto quantizationParameters =
41*89c4ff92SAndroid Build Coastguard Worker CreateQuantizationParameters(flatBufferBuilder,
42*89c4ff92SAndroid Build Coastguard Worker 0,
43*89c4ff92SAndroid Build Coastguard Worker 0,
44*89c4ff92SAndroid Build Coastguard Worker flatBufferBuilder.CreateVector<float>({ quantScale }),
45*89c4ff92SAndroid Build Coastguard Worker flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
46*89c4ff92SAndroid Build Coastguard Worker
47*89c4ff92SAndroid Build Coastguard Worker auto inputTensor = CreateTensor(flatBufferBuilder,
48*89c4ff92SAndroid Build Coastguard Worker flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
49*89c4ff92SAndroid Build Coastguard Worker inputTensorShape.size()),
50*89c4ff92SAndroid Build Coastguard Worker tensorType,
51*89c4ff92SAndroid Build Coastguard Worker 1,
52*89c4ff92SAndroid Build Coastguard Worker flatBufferBuilder.CreateString("input"),
53*89c4ff92SAndroid Build Coastguard Worker quantizationParameters);
54*89c4ff92SAndroid Build Coastguard Worker
55*89c4ff92SAndroid Build Coastguard Worker std::vector<flatbuffers::Offset<Tensor>> tensors;
56*89c4ff92SAndroid Build Coastguard Worker std::vector<int32_t> operatorInputs;
57*89c4ff92SAndroid Build Coastguard Worker std::vector<int> subgraphInputs;
58*89c4ff92SAndroid Build Coastguard Worker flatbuffers::Offset<void> operatorBuiltinOptions;
59*89c4ff92SAndroid Build Coastguard Worker
60*89c4ff92SAndroid Build Coastguard Worker if (useOption)
61*89c4ff92SAndroid Build Coastguard Worker {
62*89c4ff92SAndroid Build Coastguard Worker buffers.push_back(CreateBuffer(flatBufferBuilder));
63*89c4ff92SAndroid Build Coastguard Worker auto outputTensor = CreateTensor(flatBufferBuilder,
64*89c4ff92SAndroid Build Coastguard Worker flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
65*89c4ff92SAndroid Build Coastguard Worker outputTensorShape.size()),
66*89c4ff92SAndroid Build Coastguard Worker tensorType,
67*89c4ff92SAndroid Build Coastguard Worker 2,
68*89c4ff92SAndroid Build Coastguard Worker flatBufferBuilder.CreateString("output"),
69*89c4ff92SAndroid Build Coastguard Worker quantizationParameters);
70*89c4ff92SAndroid Build Coastguard Worker tensors = { inputTensor, outputTensor};
71*89c4ff92SAndroid Build Coastguard Worker operatorInputs = {0};
72*89c4ff92SAndroid Build Coastguard Worker subgraphInputs = {0};
73*89c4ff92SAndroid Build Coastguard Worker operatorBuiltinOptions = CreateReshapeOptions(
74*89c4ff92SAndroid Build Coastguard Worker flatBufferBuilder,
75*89c4ff92SAndroid Build Coastguard Worker flatBufferBuilder.CreateVector(targetShape.data(), targetShape.size())).Union();
76*89c4ff92SAndroid Build Coastguard Worker }
77*89c4ff92SAndroid Build Coastguard Worker else
78*89c4ff92SAndroid Build Coastguard Worker {
79*89c4ff92SAndroid Build Coastguard Worker buffers.push_back(
80*89c4ff92SAndroid Build Coastguard Worker CreateBuffer(flatBufferBuilder,
81*89c4ff92SAndroid Build Coastguard Worker flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(targetShape.data()),
82*89c4ff92SAndroid Build Coastguard Worker sizeof(int32_t) * targetShape.size())));
83*89c4ff92SAndroid Build Coastguard Worker int32_t size = static_cast<int32_t>(targetShape.size());
84*89c4ff92SAndroid Build Coastguard Worker auto shapeTensor = CreateTensor(flatBufferBuilder,
85*89c4ff92SAndroid Build Coastguard Worker flatBufferBuilder.CreateVector<int32_t>( { size } ),
86*89c4ff92SAndroid Build Coastguard Worker tflite::TensorType_INT32,
87*89c4ff92SAndroid Build Coastguard Worker 2,
88*89c4ff92SAndroid Build Coastguard Worker flatBufferBuilder.CreateString("shape"));
89*89c4ff92SAndroid Build Coastguard Worker
90*89c4ff92SAndroid Build Coastguard Worker buffers.push_back(CreateBuffer(flatBufferBuilder));
91*89c4ff92SAndroid Build Coastguard Worker auto outputTensor = CreateTensor(flatBufferBuilder,
92*89c4ff92SAndroid Build Coastguard Worker flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
93*89c4ff92SAndroid Build Coastguard Worker outputTensorShape.size()),
94*89c4ff92SAndroid Build Coastguard Worker tensorType,
95*89c4ff92SAndroid Build Coastguard Worker 3,
96*89c4ff92SAndroid Build Coastguard Worker flatBufferBuilder.CreateString("output"),
97*89c4ff92SAndroid Build Coastguard Worker quantizationParameters);
98*89c4ff92SAndroid Build Coastguard Worker
99*89c4ff92SAndroid Build Coastguard Worker tensors = { inputTensor, outputTensor, shapeTensor };
100*89c4ff92SAndroid Build Coastguard Worker operatorInputs = {0, 2};
101*89c4ff92SAndroid Build Coastguard Worker subgraphInputs = {0, 2};
102*89c4ff92SAndroid Build Coastguard Worker operatorBuiltinOptions = CreateReshapeOptions(flatBufferBuilder).Union();
103*89c4ff92SAndroid Build Coastguard Worker }
104*89c4ff92SAndroid Build Coastguard Worker
105*89c4ff92SAndroid Build Coastguard Worker // create operator
106*89c4ff92SAndroid Build Coastguard Worker tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_ReshapeOptions;
107*89c4ff92SAndroid Build Coastguard Worker
108*89c4ff92SAndroid Build Coastguard Worker const std::vector<int32_t> operatorOutputs{1};
109*89c4ff92SAndroid Build Coastguard Worker flatbuffers::Offset <Operator> redefineOperator =
110*89c4ff92SAndroid Build Coastguard Worker CreateOperator(flatBufferBuilder,
111*89c4ff92SAndroid Build Coastguard Worker 0,
112*89c4ff92SAndroid Build Coastguard Worker flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
113*89c4ff92SAndroid Build Coastguard Worker flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
114*89c4ff92SAndroid Build Coastguard Worker operatorBuiltinOptionsType,
115*89c4ff92SAndroid Build Coastguard Worker operatorBuiltinOptions);
116*89c4ff92SAndroid Build Coastguard Worker
117*89c4ff92SAndroid Build Coastguard Worker const std::vector<int> subgraphOutputs{1};
118*89c4ff92SAndroid Build Coastguard Worker flatbuffers::Offset <SubGraph> subgraph =
119*89c4ff92SAndroid Build Coastguard Worker CreateSubGraph(flatBufferBuilder,
120*89c4ff92SAndroid Build Coastguard Worker flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
121*89c4ff92SAndroid Build Coastguard Worker flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
122*89c4ff92SAndroid Build Coastguard Worker flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
123*89c4ff92SAndroid Build Coastguard Worker flatBufferBuilder.CreateVector(&redefineOperator, 1));
124*89c4ff92SAndroid Build Coastguard Worker
125*89c4ff92SAndroid Build Coastguard Worker flatbuffers::Offset <flatbuffers::String> modelDescription =
126*89c4ff92SAndroid Build Coastguard Worker flatBufferBuilder.CreateString("ArmnnDelegate: Reshape Operator Model");
127*89c4ff92SAndroid Build Coastguard Worker flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
128*89c4ff92SAndroid Build Coastguard Worker redefineOperatorCode);
129*89c4ff92SAndroid Build Coastguard Worker
130*89c4ff92SAndroid Build Coastguard Worker flatbuffers::Offset <Model> flatbufferModel =
131*89c4ff92SAndroid Build Coastguard Worker CreateModel(flatBufferBuilder,
132*89c4ff92SAndroid Build Coastguard Worker TFLITE_SCHEMA_VERSION,
133*89c4ff92SAndroid Build Coastguard Worker flatBufferBuilder.CreateVector(&operatorCode, 1),
134*89c4ff92SAndroid Build Coastguard Worker flatBufferBuilder.CreateVector(&subgraph, 1),
135*89c4ff92SAndroid Build Coastguard Worker modelDescription,
136*89c4ff92SAndroid Build Coastguard Worker flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
137*89c4ff92SAndroid Build Coastguard Worker
138*89c4ff92SAndroid Build Coastguard Worker flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
139*89c4ff92SAndroid Build Coastguard Worker
140*89c4ff92SAndroid Build Coastguard Worker return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
141*89c4ff92SAndroid Build Coastguard Worker flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
142*89c4ff92SAndroid Build Coastguard Worker }
143*89c4ff92SAndroid Build Coastguard Worker
144*89c4ff92SAndroid Build Coastguard Worker template <typename T>
RedefineTest(tflite::BuiltinOperator redefineOperatorCode,tflite::TensorType tensorType,const std::vector<armnn::BackendId> & backends,const std::vector<int32_t> & inputShape,std::vector<int32_t> & outputShape,std::vector<T> & inputValues,std::vector<T> & expectedOutputValues,std::vector<int32_t> & targetShape,bool useOption=true,float quantScale=1.0f,int quantOffset=0)145*89c4ff92SAndroid Build Coastguard Worker void RedefineTest(tflite::BuiltinOperator redefineOperatorCode,
146*89c4ff92SAndroid Build Coastguard Worker tflite::TensorType tensorType,
147*89c4ff92SAndroid Build Coastguard Worker const std::vector<armnn::BackendId>& backends,
148*89c4ff92SAndroid Build Coastguard Worker const std::vector<int32_t>& inputShape,
149*89c4ff92SAndroid Build Coastguard Worker std::vector<int32_t>& outputShape,
150*89c4ff92SAndroid Build Coastguard Worker std::vector<T>& inputValues,
151*89c4ff92SAndroid Build Coastguard Worker std::vector<T>& expectedOutputValues,
152*89c4ff92SAndroid Build Coastguard Worker std::vector<int32_t>& targetShape,
153*89c4ff92SAndroid Build Coastguard Worker bool useOption = true,
154*89c4ff92SAndroid Build Coastguard Worker float quantScale = 1.0f,
155*89c4ff92SAndroid Build Coastguard Worker int quantOffset = 0)
156*89c4ff92SAndroid Build Coastguard Worker {
157*89c4ff92SAndroid Build Coastguard Worker using namespace delegateTestInterpreter;
158*89c4ff92SAndroid Build Coastguard Worker std::vector<char> modelBuffer = CreateRedefineTfLiteModel(redefineOperatorCode,
159*89c4ff92SAndroid Build Coastguard Worker tensorType,
160*89c4ff92SAndroid Build Coastguard Worker inputShape,
161*89c4ff92SAndroid Build Coastguard Worker outputShape,
162*89c4ff92SAndroid Build Coastguard Worker targetShape,
163*89c4ff92SAndroid Build Coastguard Worker useOption,
164*89c4ff92SAndroid Build Coastguard Worker quantScale,
165*89c4ff92SAndroid Build Coastguard Worker quantOffset);
166*89c4ff92SAndroid Build Coastguard Worker
167*89c4ff92SAndroid Build Coastguard Worker // Setup interpreter with just TFLite Runtime.
168*89c4ff92SAndroid Build Coastguard Worker auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
169*89c4ff92SAndroid Build Coastguard Worker CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
170*89c4ff92SAndroid Build Coastguard Worker CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
171*89c4ff92SAndroid Build Coastguard Worker CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
172*89c4ff92SAndroid Build Coastguard Worker std::vector<T> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
173*89c4ff92SAndroid Build Coastguard Worker std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
174*89c4ff92SAndroid Build Coastguard Worker
175*89c4ff92SAndroid Build Coastguard Worker // Setup interpreter with Arm NN Delegate applied.
176*89c4ff92SAndroid Build Coastguard Worker auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
177*89c4ff92SAndroid Build Coastguard Worker CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
178*89c4ff92SAndroid Build Coastguard Worker CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
179*89c4ff92SAndroid Build Coastguard Worker CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
180*89c4ff92SAndroid Build Coastguard Worker std::vector<T> armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
181*89c4ff92SAndroid Build Coastguard Worker std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
182*89c4ff92SAndroid Build Coastguard Worker
183*89c4ff92SAndroid Build Coastguard Worker armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
184*89c4ff92SAndroid Build Coastguard Worker armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
185*89c4ff92SAndroid Build Coastguard Worker
186*89c4ff92SAndroid Build Coastguard Worker tfLiteInterpreter.Cleanup();
187*89c4ff92SAndroid Build Coastguard Worker armnnInterpreter.Cleanup();
188*89c4ff92SAndroid Build Coastguard Worker }
189*89c4ff92SAndroid Build Coastguard Worker
190*89c4ff92SAndroid Build Coastguard Worker } // anonymous namespace