1 /*
2 * Copyright (C) 2022 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <android-base/logging.h>
18 #include <android-base/properties.h>
19 #include <ftw.h>
20 #include <gtest/gtest.h>
21 #include <unistd.h>
22
23 #include <algorithm>
24 #include <cassert>
25 #include <cmath>
26 #include <fstream>
27 #include <iostream>
28 #include <limits>
29 #include <map>
30 #include <memory>
31 #include <set>
32 #include <string>
33 #include <thread>
34 #include <utility>
35 #include <vector>
36
37 #include "AndroidVersionUtil.h"
38 #include "GeneratedTestUtils.h"
39 #include "NeuralNetworks.h"
40 #include "NeuralNetworksTypes.h"
41 #include "TestHarness.h"
42 #include "TestNeuralNetworksWrapper.h"
43 #include "TestUtils.h"
44
45 #pragma clang diagnostic push
46 #pragma clang diagnostic ignored "-Wunused-parameter"
47 #include "tensorflow/lite/interpreter.h"
48 #include "tensorflow/lite/kernels/register.h"
49 #include "tensorflow/lite/model.h"
50 #pragma clang diagnostic pop
51
52 #ifdef NNTEST_CTS
53 #define NNTEST_COMPUTE_MODE
54 #endif
55
56 namespace android::nn::generated_tests {
57 using namespace test_wrapper;
58 using namespace test_helper;
59
60 class CompatibilityLayerGeneratedTests : public GeneratedTestBase {
61 protected:
62 void SetUp() override;
63 void TearDown() override;
64
65 // Test driver for those generated from packages/modules/NeuralNetworks/runtime/test/specs
66 void execute(const TestModel& testModel);
67
68 bool mTestDynamicOutputShape = false;
69 bool mTestSupported = true;
70 };
71
72 class CompatibilityLayerGeneratedTestsSupported : public CompatibilityLayerGeneratedTests {};
73 class CompatibilityLayerGeneratedTestsUnsupported : public CompatibilityLayerGeneratedTests {};
74 class CompatibilityLayerGeneratedTestsDynamicOutput : public CompatibilityLayerGeneratedTests {};
75
execute(const TestModel & testModel)76 void CompatibilityLayerGeneratedTests::execute(const TestModel& testModel) {
77 GeneratedModel model;
78 createModel(testModel, mTestDynamicOutputShape, &model);
79 if (testModel.expectFailure && !model.isValid()) {
80 return;
81 }
82 ASSERT_EQ(model.finish(), Result::NO_ERROR);
83 ASSERT_TRUE(model.isValid());
84
85 Compilation compilation(&model);
86 Result result = compilation.finish();
87 if (!mTestSupported && result != Result::NO_ERROR) return;
88 ASSERT_EQ(result, Result::NO_ERROR);
89
90 Execution execution(&compilation);
91
92 // Model inputs.
93 for (uint32_t i = 0; i < testModel.main.inputIndexes.size(); i++) {
94 const auto& operand = testModel.main.operands[testModel.main.inputIndexes[i]];
95 ASSERT_EQ(Result::NO_ERROR,
96 execution.setInput(i, operand.data.get<void>(), operand.data.size()));
97 }
98
99 // Model outputs.
100 std::vector<TestBuffer> outputs;
101 for (uint32_t i = 0; i < testModel.main.outputIndexes.size(); i++) {
102 const auto& operand = testModel.main.operands[testModel.main.outputIndexes[i]];
103 const size_t bufferSize = std::max<size_t>(operand.data.size(), 1);
104 outputs.emplace_back(bufferSize);
105
106 ASSERT_EQ(Result::NO_ERROR,
107 execution.setOutput(i, outputs.back().getMutable<void>(), bufferSize));
108 }
109
110 result = execution.compute(Execution::ComputeMode::SYNC);
111 ASSERT_EQ(result, Result::NO_ERROR);
112
113 // If a conv filter under/overflows, "compatibleTest" will report
114 // unsupported, but the actual conversion will result in NO_ERROR because
115 // it is treated as a warning, rather than an error. Because of the accuracy
116 // loss, we should not check test results in such a case.
117 //
118 // TODO(b/237410741): A potentially better approach is to have
119 // "compatibleTest" report three status: fully supported, supported with
120 // accuracy loss, and not supported.
121 if (mTestSupported) {
122 checkResults(testModel, outputs);
123 }
124 }
125
SetUp()126 void CompatibilityLayerGeneratedTests::SetUp() {
127 GeneratedTestBase::SetUp();
128 }
129
TearDown()130 void CompatibilityLayerGeneratedTests::TearDown() {
131 GeneratedTestBase::TearDown();
132 }
133
134 namespace {
135
compatibleTest(const TestModel & testModel)136 bool compatibleTest(const TestModel& testModel) {
137 static const std::vector<TestOperationType> kSupportedOperationTypes{
138 TestOperationType::CONV_2D, TestOperationType::ADD,
139 TestOperationType::DEPTHWISE_CONV_2D, TestOperationType::LOGISTIC};
140 static const std::vector<TestOperandType> kSupportedOperandTypes{
141 TestOperandType::TENSOR_FLOAT32, TestOperandType::TENSOR_INT32,
142 TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED, TestOperandType::BOOL,
143 TestOperandType::INT32};
144
145 if (testModel.hasControlFlow()) {
146 return false;
147 }
148
149 bool result = true;
150 const TestSubgraph& mainSubgraph = testModel.main;
151
152 result &= std::all_of(
153 mainSubgraph.operations.begin(), mainSubgraph.operations.end(),
154 [&mainSubgraph](const TestOperation& operation) {
155 bool isOperationCompatible = true;
156 // ensure that tensors are nhwc and filter is constant
157 if (operation.type == TestOperationType::CONV_2D ||
158 operation.type == TestOperationType::DEPTHWISE_CONV_2D) {
159 size_t implicitIsNchwIdx =
160 (operation.type == TestOperationType::CONV_2D) ? 7 : 8;
161 size_t explicitIsNchwIdx = implicitIsNchwIdx + 3;
162 bool isImplicitPadding =
163 operation.inputs.size() <= implicitIsNchwIdx ||
164 mainSubgraph.operands[operation.inputs[implicitIsNchwIdx]].type ==
165 TestOperandType::BOOL;
166 size_t isNchwIdx = isImplicitPadding ? implicitIsNchwIdx : explicitIsNchwIdx;
167
168 if (operation.inputs.size() > static_cast<uint32_t>(isNchwIdx)) {
169 isOperationCompatible &=
170 !(*mainSubgraph.operands[operation.inputs[isNchwIdx]]
171 .data.get<bool>());
172 }
173
174 const int kFilterIdx = 1;
175 const TestOperand& filterOperand =
176 mainSubgraph.operands[operation.inputs[kFilterIdx]];
177 TestOperandLifeTime filterLifetime = filterOperand.lifetime;
178 isOperationCompatible &=
179 (filterLifetime == TestOperandLifeTime::CONSTANT_COPY) ||
180 (filterLifetime == TestOperandLifeTime::CONSTANT_REFERENCE);
181
182 // check that making filter operands symmetrical does not over/underflow
183 // this is because the outputs of the model will be different from expected if
184 // the operand value changes with the under/overflow
185 if (filterOperand.type == TestOperandType::TENSOR_QUANT8_ASYMM_SIGNED) {
186 const int8_t* data = filterOperand.data.get<int8_t>();
187 size_t dataSize = filterOperand.data.size();
188
189 for (int32_t i = 0; i < static_cast<int32_t>(dataSize); i++) {
190 int32_t newValue =
191 static_cast<int32_t>(data[i]) - filterOperand.zeroPoint;
192 if (newValue < std::numeric_limits<int8_t>::min() ||
193 newValue > std::numeric_limits<int8_t>::max()) {
194 isOperationCompatible = false;
195 break;
196 }
197 }
198 }
199 }
200
201 isOperationCompatible &=
202 std::find(kSupportedOperationTypes.begin(), kSupportedOperationTypes.end(),
203 operation.type) != kSupportedOperationTypes.end();
204
205 return isOperationCompatible;
206 });
207
208 result &= std::all_of(mainSubgraph.operands.begin(), mainSubgraph.operands.end(),
209 [](const TestOperand& operand) {
210 return std::find(kSupportedOperandTypes.begin(),
211 kSupportedOperandTypes.end(),
212 operand.type) != kSupportedOperandTypes.end();
213 });
214
215 return result;
216 }
217
218 } // namespace
219
TEST_P(CompatibilityLayerGeneratedTestsSupported,CompatibilityLayerSupported)220 TEST_P(CompatibilityLayerGeneratedTestsSupported, CompatibilityLayerSupported) {
221 mTestSupported = true;
222 execute(testModel);
223 }
224
TEST_P(CompatibilityLayerGeneratedTestsUnsupported,CompatibilityLayerUnsupported)225 TEST_P(CompatibilityLayerGeneratedTestsUnsupported, CompatibilityLayerUnsupported) {
226 mTestSupported = false;
227 execute(testModel);
228 }
229
TEST_P(CompatibilityLayerGeneratedTestsDynamicOutput,CompatibilityLayerDynamicOutput)230 TEST_P(CompatibilityLayerGeneratedTestsDynamicOutput, CompatibilityLayerDynamicOutput) {
231 mTestDynamicOutputShape = true;
232 mTestSupported = false;
233 execute(testModel);
234 }
235
236 INSTANTIATE_GENERATED_TEST(CompatibilityLayerGeneratedTestsSupported,
__anonf732c1080402(const TestModel& testModel) 237 [](const TestModel& testModel) {
238 return !testModel.expectFailure && compatibleTest(testModel);
239 });
240
241 INSTANTIATE_GENERATED_TEST(CompatibilityLayerGeneratedTestsUnsupported,
__anonf732c1080502(const TestModel& testModel) 242 [](const TestModel& testModel) {
243 return !testModel.expectFailure && !compatibleTest(testModel);
244 });
245
246 INSTANTIATE_GENERATED_TEST(CompatibilityLayerGeneratedTestsDynamicOutput,
__anonf732c1080602(const TestModel& testModel) 247 [](const TestModel& testModel) {
248 return !testModel.expectFailure && !testModel.hasScalarOutputs();
249 });
250
251 } // namespace android::nn::generated_tests
252