1 //
2 // Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #pragma once
7
8 #include "TestUtils.hpp"
9
10 #include <armnn_delegate.hpp>
11 #include <DelegateTestInterpreter.hpp>
12 #include <armnnUtils/FloatingPointComparison.hpp>
13
14 #include <flatbuffers/flatbuffers.h>
15 #include <tensorflow/lite/kernels/register.h>
16 #include <tensorflow/lite/version.h>
17
18 #include <schema_generated.h>
19
20 #include <doctest/doctest.h>
21
22 namespace
23 {
CreateSoftmaxTfLiteModel(tflite::BuiltinOperator softmaxOperatorCode,tflite::TensorType tensorType,const std::vector<int32_t> & tensorShape,float beta)24 std::vector<char> CreateSoftmaxTfLiteModel(tflite::BuiltinOperator softmaxOperatorCode,
25 tflite::TensorType tensorType,
26 const std::vector <int32_t>& tensorShape,
27 float beta)
28 {
29 using namespace tflite;
30 flatbuffers::FlatBufferBuilder flatBufferBuilder;
31
32 std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
33 buffers.push_back(CreateBuffer(flatBufferBuilder));
34 buffers.push_back(CreateBuffer(flatBufferBuilder));
35 buffers.push_back(CreateBuffer(flatBufferBuilder));
36
37 std::array<flatbuffers::Offset<Tensor>, 2> tensors;
38 tensors[0] = CreateTensor(flatBufferBuilder,
39 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
40 tensorShape.size()),
41 tensorType,
42 1);
43 tensors[1] = CreateTensor(flatBufferBuilder,
44 flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
45 tensorShape.size()),
46 tensorType,
47 2);
48
49 const std::vector<int32_t> operatorInputs({0});
50 const std::vector<int32_t> operatorOutputs({1});
51
52 flatbuffers::Offset<Operator> softmaxOperator;
53 flatbuffers::Offset<flatbuffers::String> modelDescription;
54 flatbuffers::Offset<OperatorCode> operatorCode;
55
56 switch (softmaxOperatorCode)
57 {
58 case tflite::BuiltinOperator_SOFTMAX:
59 softmaxOperator =
60 CreateOperator(flatBufferBuilder,
61 0,
62 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
63 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
64 BuiltinOptions_SoftmaxOptions,
65 CreateSoftmaxOptions(flatBufferBuilder, beta).Union());
66 modelDescription = flatBufferBuilder.CreateString("ArmnnDelegate: Softmax Operator Model");
67 operatorCode = CreateOperatorCode(flatBufferBuilder,
68 tflite::BuiltinOperator_SOFTMAX);
69 break;
70 case tflite::BuiltinOperator_LOG_SOFTMAX:
71 softmaxOperator =
72 CreateOperator(flatBufferBuilder,
73 0,
74 flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
75 flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
76 BuiltinOptions_LogSoftmaxOptions,
77 CreateLogSoftmaxOptions(flatBufferBuilder).Union());
78 flatBufferBuilder.CreateString("ArmnnDelegate: Log-Softmax Operator Model");
79 operatorCode = CreateOperatorCode(flatBufferBuilder,
80 tflite::BuiltinOperator_LOG_SOFTMAX);
81 break;
82 default:
83 break;
84 }
85 const std::vector<int32_t> subgraphInputs({0});
86 const std::vector<int32_t> subgraphOutputs({1});
87 flatbuffers::Offset<SubGraph> subgraph =
88 CreateSubGraph(flatBufferBuilder,
89 flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
90 flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
91 flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
92 flatBufferBuilder.CreateVector(&softmaxOperator, 1));
93 flatbuffers::Offset<Model> flatbufferModel =
94 CreateModel(flatBufferBuilder,
95 TFLITE_SCHEMA_VERSION,
96 flatBufferBuilder.CreateVector(&operatorCode, 1),
97 flatBufferBuilder.CreateVector(&subgraph, 1),
98 modelDescription,
99 flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
100 flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
101 return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
102 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
103 }
104
SoftmaxTest(tflite::BuiltinOperator softmaxOperatorCode,tflite::TensorType tensorType,std::vector<armnn::BackendId> & backends,std::vector<int32_t> & shape,std::vector<float> & inputValues,std::vector<float> & expectedOutputValues,float beta=0)105 void SoftmaxTest(tflite::BuiltinOperator softmaxOperatorCode,
106 tflite::TensorType tensorType,
107 std::vector<armnn::BackendId>& backends,
108 std::vector<int32_t>& shape,
109 std::vector<float>& inputValues,
110 std::vector<float>& expectedOutputValues,
111 float beta = 0)
112 {
113 using namespace delegateTestInterpreter;
114 std::vector<char> modelBuffer = CreateSoftmaxTfLiteModel(softmaxOperatorCode,
115 tensorType,
116 shape,
117 beta);
118
119 // Setup interpreter with just TFLite Runtime.
120 auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
121 CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
122 CHECK(tfLiteInterpreter.FillInputTensor<float>(inputValues, 0) == kTfLiteOk);
123 CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
124 std::vector<float> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<float>(0);
125 std::vector<int32_t> tfLiteOutputShape = tfLiteInterpreter.GetOutputShape(0);
126
127 // Setup interpreter with Arm NN Delegate applied.
128 auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
129 CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
130 CHECK(armnnInterpreter.FillInputTensor<float>(inputValues, 0) == kTfLiteOk);
131 CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
132 std::vector<float> armnnOutputValues = armnnInterpreter.GetOutputResult<float>(0);
133 std::vector<int32_t> armnnOutputShape = armnnInterpreter.GetOutputShape(0);
134
135 armnnDelegate::CompareOutputData<float>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
136 armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, shape);
137
138 tfLiteInterpreter.Cleanup();
139 armnnInterpreter.Cleanup();
140 }
141
142
143 /// Convenience function to run softmax and log-softmax test cases
144 /// \param operatorCode tflite::BuiltinOperator_SOFTMAX or tflite::BuiltinOperator_LOG_SOFTMAX
145 /// \param backends armnn backends to target
146 /// \param beta multiplicative parameter to the softmax function
147 /// \param expectedOutput to be checked against transformed input
SoftmaxTestCase(tflite::BuiltinOperator operatorCode,std::vector<armnn::BackendId> backends,float beta,std::vector<float> expectedOutput)148 void SoftmaxTestCase(tflite::BuiltinOperator operatorCode,
149 std::vector<armnn::BackendId> backends, float beta, std::vector<float> expectedOutput) {
150 std::vector<float> input = {
151 1.0, 2.5, 3.0, 4.5, 5.0,
152 -1.0, -2.5, -3.0, -4.5, -5.0};
153 std::vector<int32_t> shape = {2, 5};
154
155 SoftmaxTest(operatorCode,
156 tflite::TensorType_FLOAT32,
157 backends,
158 shape,
159 input,
160 expectedOutput,
161 beta);
162 }
163
164 } // anonymous namespace
165