1 /*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <gmock/gmock.h>
18 #include <gtest/gtest.h>
19
20 #include <algorithm>
21 #include <cstddef>
22 #include <cstdint>
23 #include <functional>
24 #include <iterator>
25 #include <memory>
26 #include <optional>
27 #include <set>
28 #include <sstream>
29 #include <string>
30 #include <utility>
31 #include <vector>
32
33 #include "NeuralNetworks.h"
34 #include "NeuralNetworksOEM.h"
35 #include "NeuralNetworksWrapper.h"
36
37 using namespace android::nn::wrapper;
38
39 namespace {
40
41 static const int32_t kAvailableOperandCodes[] = {ANEURALNETWORKS_FLOAT32,
42 ANEURALNETWORKS_INT32,
43 ANEURALNETWORKS_UINT32,
44 ANEURALNETWORKS_TENSOR_FLOAT32,
45 ANEURALNETWORKS_TENSOR_INT32,
46 ANEURALNETWORKS_TENSOR_QUANT8_ASYMM,
47 ANEURALNETWORKS_BOOL,
48 ANEURALNETWORKS_TENSOR_QUANT16_SYMM,
49 ANEURALNETWORKS_TENSOR_FLOAT16,
50 ANEURALNETWORKS_TENSOR_BOOL8,
51 ANEURALNETWORKS_FLOAT16,
52 ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL,
53 ANEURALNETWORKS_TENSOR_QUANT16_ASYMM,
54 ANEURALNETWORKS_TENSOR_OEM_BYTE};
55
getOpType(int32_t opcode,uint32_t dimCount=0,const uint32_t * dim=nullptr)56 ANeuralNetworksOperandType getOpType(int32_t opcode, uint32_t dimCount = 0,
57 const uint32_t* dim = nullptr) {
58 ANeuralNetworksOperandType opType = {.type = opcode,
59 .dimensionCount = dimCount,
60 .dimensions = dim,
61 .scale = 0.0,
62 .zeroPoint = 0};
63 if (opcode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM ||
64 opcode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED ||
65 opcode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM ||
66 opcode == ANEURALNETWORKS_TENSOR_QUANT16_ASYMM ||
67 opcode == ANEURALNETWORKS_TENSOR_QUANT16_SYMM) {
68 opType.scale = 1. / 256.;
69 }
70 return opType;
71 }
72
73 struct OperandTypeWithExtraParams {
OperandTypeWithExtraParams__anon6f986e5f0111::OperandTypeWithExtraParams74 OperandTypeWithExtraParams(const ANeuralNetworksOperandType& operandType)
75 : operandType(operandType), channelQuant(std::nullopt), valueModel(std::nullopt) {}
76
77 ANeuralNetworksOperandType operandType;
78 std::optional<ANeuralNetworksSymmPerChannelQuantParams> channelQuant;
79 std::optional<const ANeuralNetworksModel*> valueModel;
80
operator ==__anon6f986e5f0111::OperandTypeWithExtraParams81 bool operator==(const OperandTypeWithExtraParams& that) const {
82 if (operandType.type != that.operandType.type ||
83 operandType.scale != that.operandType.scale ||
84 operandType.zeroPoint != that.operandType.zeroPoint ||
85 operandType.dimensionCount != that.operandType.dimensionCount) {
86 return false;
87 }
88
89 if (channelQuant.has_value() != that.channelQuant.has_value() ||
90 (channelQuant.has_value() &&
91 (channelQuant->channelDim != that.channelQuant->channelDim ||
92 channelQuant->scaleCount != that.channelQuant->scaleCount))) {
93 return false;
94 }
95
96 if (valueModel != that.valueModel) {
97 return false;
98 }
99
100 if (operandType.dimensions) {
101 if (!that.operandType.dimensions) {
102 return false;
103 }
104 if (!std::equal(operandType.dimensions,
105 operandType.dimensions + operandType.dimensionCount,
106 that.operandType.dimensions)) {
107 return false;
108 }
109 } else {
110 if (that.operandType.dimensions) {
111 return false;
112 }
113 }
114
115 if (channelQuant.has_value()) {
116 if (channelQuant->scales) {
117 return that.channelQuant->scales &&
118 std::equal(channelQuant->scales,
119 channelQuant->scales + channelQuant->scaleCount,
120 that.channelQuant->scales);
121 } else {
122 return that.channelQuant->scales == nullptr;
123 }
124 }
125 return true;
126 }
127
operator !=__anon6f986e5f0111::OperandTypeWithExtraParams128 bool operator!=(const OperandTypeWithExtraParams& that) const { return !(*this == that); }
129
operator <__anon6f986e5f0111::OperandTypeWithExtraParams130 bool operator<(const OperandTypeWithExtraParams& that) const {
131 if (operandType.type < that.operandType.type) return true;
132 if (operandType.dimensionCount < that.operandType.dimensionCount) return true;
133 return false;
134 }
135 };
136
137 // Generates valid and invalid mutations of given OperandTypeWithParams
138 // instances.
139 // It is also responsible of freeing the memory allocated when creating
140 // mutations.
141 // Mutations shouldn't outlive the generating TensorRankConstraint instance.
142 class TensorRankConstraint {
143 public:
TensorRankConstraint(const TensorRankConstraint & copyFrom)144 TensorRankConstraint(const TensorRankConstraint& copyFrom) {
145 // ignoring the array of allocated dimension
146 this->mRangeMax = copyFrom.mRangeMax;
147 this->mRangeMin = copyFrom.mRangeMin;
148 }
149
operator =(const TensorRankConstraint & copyFrom)150 TensorRankConstraint& operator=(const TensorRankConstraint& copyFrom) {
151 // ignoring the array of allocated dimension
152 this->mRangeMax = copyFrom.mRangeMax;
153 this->mRangeMin = copyFrom.mRangeMin;
154 return *this;
155 }
156
Exactly(uint32_t rank)157 static TensorRankConstraint Exactly(uint32_t rank) {
158 return TensorRankConstraint(std::optional(rank), std::optional(rank));
159 }
160
AtLeast(uint32_t min)161 static TensorRankConstraint AtLeast(uint32_t min) {
162 return TensorRankConstraint(std::optional(min), std::nullopt);
163 }
164
UpTo(uint32_t max)165 static TensorRankConstraint UpTo(uint32_t max) {
166 return TensorRankConstraint(std::nullopt, std::optional(max));
167 }
168
Between(uint32_t min,uint32_t max)169 static TensorRankConstraint Between(uint32_t min, uint32_t max) {
170 if (min == 0) {
171 return UpTo(max);
172 }
173 return TensorRankConstraint(std::optional(min), std::optional(max));
174 }
175
MutationsWithValidRank(const std::vector<OperandTypeWithExtraParams> & operandsTypeWithParams)176 std::set<std::vector<OperandTypeWithExtraParams>> MutationsWithValidRank(
177 const std::vector<OperandTypeWithExtraParams>& operandsTypeWithParams) {
178 // can't be both nullopt
179 if (!mRangeMin) {
180 return {ModifyForRank(operandsTypeWithParams, 1),
181 ModifyForRank(operandsTypeWithParams, *mRangeMax)};
182 } else if (!mRangeMax) {
183 return {ModifyForRank(operandsTypeWithParams, *mRangeMin),
184 ModifyForRank(operandsTypeWithParams, *mRangeMin + 1)};
185 } else if (mRangeMax == mRangeMin) {
186 std::for_each(operandsTypeWithParams.begin(), operandsTypeWithParams.end(),
187 [this](const OperandTypeWithExtraParams& op) {
188 assert(op.operandType.dimensionCount == *mRangeMin);
189 });
190 return {operandsTypeWithParams};
191 } else {
192 return {ModifyForRank(operandsTypeWithParams, *mRangeMin),
193 ModifyForRank(operandsTypeWithParams, *mRangeMax)};
194 }
195 }
196
MutationsWithInvalidRank(const std::vector<OperandTypeWithExtraParams> & operandsTypeWithParams)197 std::set<std::vector<OperandTypeWithExtraParams>> MutationsWithInvalidRank(
198 const std::vector<OperandTypeWithExtraParams>& operandsTypeWithParams) {
199 std::set<std::vector<OperandTypeWithExtraParams>> result;
200 if (mRangeMax) {
201 result.insert(ModifyForRank(operandsTypeWithParams, *mRangeMax + 1));
202 }
203 if (mRangeMin.value_or(0) > 1) {
204 result.insert(ModifyForRank(operandsTypeWithParams, *mRangeMin - 1));
205 }
206 return result;
207 }
208
209 private:
ModifyForRank(const std::vector<OperandTypeWithExtraParams> & operandsTypeWithParams,uint32_t newRank)210 std::vector<OperandTypeWithExtraParams> ModifyForRank(
211 const std::vector<OperandTypeWithExtraParams>& operandsTypeWithParams,
212 uint32_t newRank) {
213 std::vector<OperandTypeWithExtraParams> result;
214 std::transform(operandsTypeWithParams.cbegin(), operandsTypeWithParams.cend(),
215 std::back_inserter(result),
216 [this, newRank](const OperandTypeWithExtraParams& operandTypeWithParams) {
217 return ModifyForRank(operandTypeWithParams, newRank);
218 });
219 return result;
220 }
221
ModifyForRank(const OperandTypeWithExtraParams & operandTypeWithParams,uint32_t newRank)222 OperandTypeWithExtraParams ModifyForRank(
223 const OperandTypeWithExtraParams& operandTypeWithParams, uint32_t newRank) {
224 if (operandTypeWithParams.operandType.dimensionCount == newRank) {
225 return operandTypeWithParams;
226 }
227
228 uint32_t* resultDimensions = nullptr;
229 if (newRank != 0) {
230 std::unique_ptr<uint32_t[]> dimensions = std::make_unique<uint32_t[]>(newRank);
231 resultDimensions = dimensions.get();
232 mAllocatedDimensions.insert(std::move(dimensions));
233 std::fill(resultDimensions, resultDimensions + newRank, 1);
234 const auto originDims = operandTypeWithParams.operandType.dimensions;
235 if (originDims != nullptr) {
236 const int dimsToCopy =
237 std::min(operandTypeWithParams.operandType.dimensionCount, newRank);
238 std::copy(originDims, originDims + dimsToCopy, resultDimensions);
239 }
240 }
241
242 OperandTypeWithExtraParams result = operandTypeWithParams;
243 result.operandType = {
244 .type = operandTypeWithParams.operandType.type,
245 .dimensionCount = newRank,
246 .dimensions = resultDimensions,
247 .scale = operandTypeWithParams.operandType.scale,
248 .zeroPoint = operandTypeWithParams.operandType.zeroPoint,
249 };
250
251 return result;
252 }
253
TensorRankConstraint(const std::optional<uint32_t> & min,const std::optional<uint32_t> & max)254 TensorRankConstraint(const std::optional<uint32_t>& min, const std::optional<uint32_t>& max)
255 : mRangeMin(min), mRangeMax(max) {
256 if (mRangeMax.has_value()) {
257 assert(*mRangeMax >= mRangeMin.value_or(0));
258 }
259
260 assert(mRangeMax.has_value() || mRangeMin.has_value());
261 }
262
263 std::optional<uint32_t> mRangeMin;
264 std::optional<uint32_t> mRangeMax;
265 std::set<std::unique_ptr<uint32_t[]>> mAllocatedDimensions;
266 };
267
268 // Mutates a set of inputs applying the same rank constraint.
269 class TensorRankMutator {
270 public:
TensorRankMutator(const TensorRankConstraint & constraint,const std::set<uint32_t> & applyToIndexes={0})271 TensorRankMutator(const TensorRankConstraint& constraint,
272 const std::set<uint32_t>& applyToIndexes = {0})
273 : mApplyToIndexes(applyToIndexes.begin(), applyToIndexes.end()), mConstraint(constraint) {}
274
ValidInputsMutations(const std::vector<OperandTypeWithExtraParams> & validInputs)275 std::set<std::vector<OperandTypeWithExtraParams>> ValidInputsMutations(
276 const std::vector<OperandTypeWithExtraParams>& validInputs) {
277 return InputsMutations(
278 validInputs, [this](const std::vector<OperandTypeWithExtraParams>& inputsToMutate) {
279 return mConstraint.MutationsWithValidRank(inputsToMutate);
280 });
281 }
282
InvalidInputsMutations(const std::vector<OperandTypeWithExtraParams> & validInputs)283 std::set<std::vector<OperandTypeWithExtraParams>> InvalidInputsMutations(
284 const std::vector<OperandTypeWithExtraParams>& validInputs) {
285 return InputsMutations(
286 validInputs, [this](const std::vector<OperandTypeWithExtraParams>& inputsToMutate) {
287 return mConstraint.MutationsWithInvalidRank(inputsToMutate);
288 });
289 }
290
291 private:
InputsMutations(const std::vector<OperandTypeWithExtraParams> & validInputs,std::function<std::set<std::vector<OperandTypeWithExtraParams>> (const std::vector<OperandTypeWithExtraParams> &)> operandMutator)292 std::set<std::vector<OperandTypeWithExtraParams>> InputsMutations(
293 const std::vector<OperandTypeWithExtraParams>& validInputs,
294 std::function<std::set<std::vector<OperandTypeWithExtraParams>>(
295 const std::vector<OperandTypeWithExtraParams>&)>
296 operandMutator) {
297 std::for_each(mApplyToIndexes.begin(), mApplyToIndexes.end(),
298 [&validInputs](uint32_t index) { assert(index < validInputs.size()); });
299
300 std::vector<OperandTypeWithExtraParams> toMutate;
301 std::transform(mApplyToIndexes.begin(), mApplyToIndexes.end(), std::back_inserter(toMutate),
302 [&validInputs](int input_index) { return validInputs[input_index]; });
303
304 // Get a series of mutation for the operands in toMutate
305 std::set<std::vector<OperandTypeWithExtraParams>> mutatedOps = operandMutator(toMutate);
306
307 // Generate a set of mutation by replacing the mutated ops in validInputs
308 // with all the mutations in mutatedOps
309 std::set<std::vector<OperandTypeWithExtraParams>> mutatedValidInputs;
310 std::transform(
311 mutatedOps.cbegin(), mutatedOps.cend(),
312 std::inserter(mutatedValidInputs, mutatedValidInputs.begin()),
313 [this, &validInputs](const std::vector<OperandTypeWithExtraParams>& opsMutation) {
314 std::vector<OperandTypeWithExtraParams> currInputMutation(validInputs.begin(),
315 validInputs.end());
316 for (size_t i = 0; i < mApplyToIndexes.size(); i++) {
317 currInputMutation[mApplyToIndexes[i]] = opsMutation[i];
318 }
319
320 return currInputMutation;
321 });
322
323 return mutatedValidInputs;
324 }
325
326 std::vector<uint32_t> mApplyToIndexes;
327 TensorRankConstraint mConstraint;
328 };
329
330 class OperationTestBase {
331 public:
OperationTestBase(ANeuralNetworksOperationType opCode,const std::vector<ANeuralNetworksOperandType> & validInputs,const std::vector<ANeuralNetworksOperandType> & validOutputs,const std::vector<TensorRankMutator> & inputRankMutators={})332 OperationTestBase(ANeuralNetworksOperationType opCode,
333 const std::vector<ANeuralNetworksOperandType>& validInputs,
334 const std::vector<ANeuralNetworksOperandType>& validOutputs,
335 const std::vector<TensorRankMutator>& inputRankMutators = {})
336 : mOpCode(opCode), mValidInputs(), mValidOutputs(), mInputRankMutators(inputRankMutators) {
337 for (ANeuralNetworksOperandType input : validInputs) {
338 mValidInputs.push_back(input);
339 }
340 for (ANeuralNetworksOperandType output : validOutputs) {
341 mValidOutputs.push_back(output);
342 }
343 }
344
setInputSymmPerChannelQuantParams(int32_t index,const ANeuralNetworksSymmPerChannelQuantParams & channelQuant)345 void setInputSymmPerChannelQuantParams(
346 int32_t index, const ANeuralNetworksSymmPerChannelQuantParams& channelQuant) {
347 mValidInputs[index].channelQuant = channelQuant;
348 }
349
setOutputSymmPerChannelQuantParams(int32_t index,const ANeuralNetworksSymmPerChannelQuantParams & channelQuant)350 void setOutputSymmPerChannelQuantParams(
351 int32_t index, const ANeuralNetworksSymmPerChannelQuantParams& channelQuant) {
352 mValidOutputs[index].channelQuant = channelQuant;
353 }
354
setInputOperandValueFromModel(int32_t index,const ANeuralNetworksModel * valueModel)355 void setInputOperandValueFromModel(int32_t index, const ANeuralNetworksModel* valueModel) {
356 mValidInputs[index].valueModel = valueModel;
357 }
358
359 // Add each operand separately and add the operation using these operands.
360 // This function does not cover the cases that an operand is used mutiple times.
addOperation(const std::vector<OperandTypeWithExtraParams> & inputs,const std::vector<OperandTypeWithExtraParams> & outputs)361 int32_t addOperation(const std::vector<OperandTypeWithExtraParams>& inputs,
362 const std::vector<OperandTypeWithExtraParams>& outputs) {
363 ANeuralNetworksModel* model = nullptr;
364 ANeuralNetworksModel_create(&model);
365
366 uint32_t opIdx = 0;
367 std::vector<uint32_t> inputIds;
368 std::vector<uint32_t> outputIds;
369 for (uint32_t i = 0; i < inputs.size(); i++) {
370 ANeuralNetworksModel_addOperand(model, &inputs[i].operandType);
371 if (inputs[i].channelQuant) {
372 ANeuralNetworksModel_setOperandSymmPerChannelQuantParams(
373 model, opIdx, &inputs[i].channelQuant.value());
374 }
375 if (inputs[i].valueModel) {
376 ANeuralNetworksModel_setOperandValueFromModel(model, opIdx,
377 inputs[i].valueModel.value());
378 }
379 inputIds.push_back(opIdx++);
380 }
381 for (uint32_t i = 0; i < outputs.size(); i++) {
382 ANeuralNetworksModel_addOperand(model, &outputs[i].operandType);
383 if (outputs[i].channelQuant) {
384 ANeuralNetworksModel_setOperandSymmPerChannelQuantParams(
385 model, opIdx, &outputs[i].channelQuant.value());
386 }
387 outputIds.push_back(opIdx++);
388 }
389
390 int32_t result = ANeuralNetworksModel_addOperation(
391 model, mOpCode, static_cast<uint32_t>(inputIds.size()), inputIds.data(),
392 static_cast<uint32_t>(outputIds.size()), outputIds.data());
393 ANeuralNetworksModel_free(model);
394 return result;
395 }
396
testOpsValidations()397 void testOpsValidations() {
398 EXPECT_TRUE(testSuccess());
399 EXPECT_TRUE(testMutatingInputOperandCode());
400 EXPECT_TRUE(testMutatingInputOperandCounts());
401 EXPECT_TRUE(testMutatingOutputOperandCode());
402 EXPECT_TRUE(testMutatingOutputOperandCounts());
403 EXPECT_TRUE(testMutatingInputRanks());
404 }
405
testFailure(int32_t expectedResult)406 void testFailure(int32_t expectedResult) {
407 int32_t result = addOperation(mValidInputs, mValidOutputs);
408 EXPECT_TRUE(expectedResult == result);
409 }
410
testSuccess()411 bool testSuccess() {
412 int32_t result = addOperation(mValidInputs, mValidOutputs);
413 return ANEURALNETWORKS_NO_ERROR == result;
414 }
415
testMutatingInputOperandCode()416 bool testMutatingInputOperandCode() {
417 for (uint32_t i = 0; i < mValidInputs.size(); i++) {
418 // LSH_PROJECTION's second argument is allowed to have any type.
419 // This is the only operation that currently has a type that can be
420 // anything independent from any other type. Changing the operand
421 // type to any other type will result in a valid model for
422 // LSH_PROJECTION. If this is the case, skip the test.
423 if (mOpCode == ANEURALNETWORKS_LSH_PROJECTION && i == 1) {
424 continue;
425 }
426 // RANK can have input of any type.
427 if (mOpCode == ANEURALNETWORKS_RANK) {
428 continue;
429 }
430 OperandTypeWithExtraParams newType = mValidInputs[i];
431 int32_t originalOperandCode = mValidInputs[i].operandType.type;
432 std::set<int32_t> operandTypesToSkip;
433 // Transposed conv can have either fully quantized or per-channel
434 // quantized filter for the quantized version of the op.
435 if ((mOpCode == ANEURALNETWORKS_TRANSPOSE_CONV_2D ||
436 mOpCode == ANEURALNETWORKS_DEPTHWISE_CONV_2D) &&
437 i == 1) {
438 if (originalOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM ||
439 originalOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED ||
440 originalOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
441 operandTypesToSkip.insert(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
442 operandTypesToSkip.insert(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
443 operandTypesToSkip.insert(ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL);
444 }
445 }
446 // CAST accepts any of supported types for any of output types
447 if (mOpCode == ANEURALNETWORKS_CAST && i == 0) {
448 operandTypesToSkip.insert(ANEURALNETWORKS_TENSOR_FLOAT16);
449 operandTypesToSkip.insert(ANEURALNETWORKS_TENSOR_FLOAT32);
450 operandTypesToSkip.insert(ANEURALNETWORKS_TENSOR_INT32);
451 operandTypesToSkip.insert(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
452 operandTypesToSkip.insert(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
453 }
454 // RANDOM_MULTINOMIAL's first input can be either of float16 or
455 // float32 type while everything else has the same types.
456 if (mOpCode == ANEURALNETWORKS_RANDOM_MULTINOMIAL && i == 0) {
457 if (originalOperandCode == ANEURALNETWORKS_TENSOR_FLOAT16) {
458 operandTypesToSkip.insert(ANEURALNETWORKS_TENSOR_FLOAT32);
459 } else if (originalOperandCode == ANEURALNETWORKS_TENSOR_FLOAT32) {
460 operandTypesToSkip.insert(ANEURALNETWORKS_TENSOR_FLOAT16);
461 }
462 }
463 // DEQUANTIZE supports any of the inputs types below for any of the
464 // output types.
465 if (mOpCode == ANEURALNETWORKS_DEQUANTIZE && i == 0) {
466 operandTypesToSkip.insert(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
467 operandTypesToSkip.insert(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
468 operandTypesToSkip.insert(ANEURALNETWORKS_TENSOR_QUANT8_SYMM);
469 operandTypesToSkip.insert(ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL);
470 }
471 // AXIS_ALIGNED_BBOX_TRANSFORM's second input cab be either QUANT8_ASYMM or
472 // QUANT8_ASYMM_SIGNED
473 if (mOpCode == ANEURALNETWORKS_AXIS_ALIGNED_BBOX_TRANSFORM && i == 1) {
474 operandTypesToSkip.insert(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
475 operandTypesToSkip.insert(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
476 }
477
478 for (int32_t newOperandCode : kAvailableOperandCodes) {
479 if (newOperandCode == originalOperandCode ||
480 operandTypesToSkip.find(newOperandCode) != operandTypesToSkip.end()) {
481 continue;
482 }
483 // Switch input 7 from bool to int for 10-input CONV_2d
484 // switch between valid "implicit padding with layout param"
485 // and valid "explicit padding without layout param"
486 if (mOpCode == ANEURALNETWORKS_CONV_2D && i == 7 && mValidInputs.size() == 10) {
487 if ((newOperandCode == ANEURALNETWORKS_INT32 &&
488 originalOperandCode == ANEURALNETWORKS_BOOL) ||
489 (newOperandCode == ANEURALNETWORKS_BOOL &&
490 originalOperandCode == ANEURALNETWORKS_INT32)) {
491 continue;
492 }
493 }
494 // QUANTIZE supports both types below and its output type does
495 // not depend on the input type.
496 if (mOpCode == ANEURALNETWORKS_QUANTIZE && i == 0 &&
497 (newOperandCode == ANEURALNETWORKS_TENSOR_FLOAT16 ||
498 newOperandCode == ANEURALNETWORKS_TENSOR_FLOAT32)) {
499 continue;
500 }
501
502 // ARGMIN/MAX supports four input types and has a fixed output type.
503 if ((mOpCode == ANEURALNETWORKS_ARGMIN || mOpCode == ANEURALNETWORKS_ARGMAX) &&
504 i == 0 &&
505 (newOperandCode == ANEURALNETWORKS_TENSOR_FLOAT16 ||
506 newOperandCode == ANEURALNETWORKS_TENSOR_FLOAT32 ||
507 newOperandCode == ANEURALNETWORKS_TENSOR_INT32 ||
508 newOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM ||
509 newOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED)) {
510 continue;
511 }
512
513 // Switch input 8 from bool to int for 11-input DEPTHWISE_CONV_2D
514 // switch between valid "implicit padding with layout param"
515 // and valid "explicit padding without layout param"
516 if (mOpCode == ANEURALNETWORKS_DEPTHWISE_CONV_2D && i == 8 &&
517 mValidInputs.size() == 11) {
518 if ((newOperandCode == ANEURALNETWORKS_INT32 &&
519 originalOperandCode == ANEURALNETWORKS_BOOL) ||
520 (newOperandCode == ANEURALNETWORKS_BOOL &&
521 originalOperandCode == ANEURALNETWORKS_INT32)) {
522 continue;
523 }
524 }
525
526 newType.operandType.type = newOperandCode;
527 std::vector<OperandTypeWithExtraParams> inputs = mValidInputs;
528 inputs[i] = newType;
529 int32_t result = addOperation(inputs, mValidOutputs);
530 if (ANEURALNETWORKS_NO_ERROR == result) {
531 return false;
532 }
533 }
534 }
535 return true;
536 }
537
testMutatingOutputOperandCode()538 bool testMutatingOutputOperandCode() {
539 for (uint32_t i = 0; i < mValidOutputs.size(); i++) {
540 // LSH_PROJECTION's second argument is allowed to have any type.
541 // This is the only operation that currently has a type that can be
542 // anything independent from any other type. Changing the operand
543 // type to any other type will result in a valid model for
544 // LSH_PROJECTION. If this is the case, skip the test.
545 if (mOpCode == ANEURALNETWORKS_LSH_PROJECTION && i == 1) {
546 continue;
547 }
548 OperandTypeWithExtraParams newType = mValidOutputs[i].operandType;
549 int32_t originalOperandCode = mValidOutputs[i].operandType.type;
550 for (int32_t newOperandCode : kAvailableOperandCodes) {
551 if (newOperandCode == originalOperandCode) {
552 continue;
553 }
554 // DEQUANTIZE's output can be either TENSOR_FLOAT16 or TENSOR_FLOAT32.
555 if (mOpCode == ANEURALNETWORKS_DEQUANTIZE &&
556 (newOperandCode == ANEURALNETWORKS_TENSOR_FLOAT16 ||
557 newOperandCode == ANEURALNETWORKS_TENSOR_FLOAT32)) {
558 continue;
559 }
560
561 // QUANTIZE's output can be either TENSOR_QUANT8_ASYMM or
562 // TENSOR_QUANT8_ASYMM_SIGNED.
563 if (mOpCode == ANEURALNETWORKS_QUANTIZE && i == 0 &&
564 (newOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM ||
565 newOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED)) {
566 continue;
567 }
568
569 // CAST accepts any of supported types for any of input types
570 if (mOpCode == ANEURALNETWORKS_CAST && i == 0 &&
571 (newOperandCode == ANEURALNETWORKS_TENSOR_FLOAT16 ||
572 newOperandCode == ANEURALNETWORKS_TENSOR_FLOAT32 ||
573 newOperandCode == ANEURALNETWORKS_TENSOR_INT32 ||
574 newOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM)) {
575 continue;
576 }
577 newType.operandType.type = newOperandCode;
578 std::vector<OperandTypeWithExtraParams> outputs = mValidOutputs;
579 outputs[i] = newType;
580 int32_t result = addOperation(mValidInputs, outputs);
581 if (ANEURALNETWORKS_NO_ERROR == result) {
582 return false;
583 }
584 }
585 }
586 return true;
587 }
588
testMutatingInputOperandCounts()589 bool testMutatingInputOperandCounts() {
590 uint32_t numToAdd = 5;
591 // LSTM since API 29 supports 23 and 27 outputs.
592 if (mOpCode == ANEURALNETWORKS_LSTM) {
593 numToAdd = 3;
594 }
595 std::vector<OperandTypeWithExtraParams> inputs = mValidInputs;
596 for (uint32_t i = 0; i < numToAdd; i++) {
597 inputs.push_back(inputs[0]);
598 if (ANEURALNETWORKS_NO_ERROR == addOperation(inputs, mValidOutputs)) {
599 return false;
600 }
601 }
602 return true;
603 }
604
testMutatingOutputOperandCounts()605 bool testMutatingOutputOperandCounts() {
606 // SPLIT's number of outputs depends on a value of one of its inputs and
607 // are not checked during validation.
608 if (mOpCode == ANEURALNETWORKS_SPLIT) {
609 return true;
610 }
611 std::vector<OperandTypeWithExtraParams> outputs = mValidOutputs;
612 for (int i = 0; i < 6; i++) {
613 outputs.push_back(outputs[0]);
614 if (ANEURALNETWORKS_NO_ERROR == addOperation(mValidInputs, outputs)) {
615 if (mOpCode == ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN && i < 1) {
616 continue;
617 }
618 if (mOpCode == ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_LSTM && i < 3) {
619 continue;
620 }
621 if (mOpCode == ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_RNN && i < 3) {
622 continue;
623 }
624 if (mOpCode == ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM && i < 5) {
625 continue;
626 }
627 return false;
628 }
629 }
630 return true;
631 }
632
testMutatingInputRanks()633 bool testMutatingInputRanks() {
634 for (auto& rankMutator : mInputRankMutators) {
635 for (const auto& validMutation : rankMutator.ValidInputsMutations(mValidInputs)) {
636 int32_t result = addOperation(validMutation, mValidOutputs);
637 if (ANEURALNETWORKS_NO_ERROR != result) {
638 return false;
639 }
640 }
641
642 for (const auto& invalidMutation : rankMutator.InvalidInputsMutations(mValidInputs)) {
643 int32_t result = addOperation(invalidMutation, mValidOutputs);
644 if (ANEURALNETWORKS_NO_ERROR == result) {
645 return false;
646 }
647 }
648 }
649
650 return true;
651 }
652
653 private:
654 ANeuralNetworksOperationType mOpCode;
655 // The dimensions in the ANeuralNetworksOperandType must outlive the test object.
656 std::vector<OperandTypeWithExtraParams> mValidInputs;
657 std::vector<OperandTypeWithExtraParams> mValidOutputs;
658
659 std::vector<TensorRankMutator> mInputRankMutators;
660 };
661
operator <<(std::ostream & os,const OperandTypeWithExtraParams & operand)662 std::ostream& operator<<(std::ostream& os, const OperandTypeWithExtraParams& operand) {
663 const auto& operandType = operand.operandType;
664 os << "{ operand_type: { type: " << operandType.type << ", "
665 << "dimensionCount: " << operandType.dimensionCount << ", dimensions: [";
666 std::for_each(operandType.dimensions, operandType.dimensions + operandType.dimensionCount,
667 [&os](uint32_t dimension) { os << dimension << ", "; });
668 os << "], scale: " << operandType.scale << ", zeroPoint: " << operandType.zeroPoint << " }";
669
670 const auto& channelQuant = operand.channelQuant;
671 if (channelQuant.has_value()) {
672 os << ", channelQuant { channelDim: " << channelQuant->channelDim
673 << ", scaleCount: " << channelQuant->scaleCount << ", scales: [";
674 std::for_each(channelQuant->scales, channelQuant->scales + channelQuant->scaleCount,
675 [&os](float scale) { os << scale << ", "; });
676 os << "] }";
677 } else {
678 os << ", channelQuant: nullopt";
679 }
680
681 if (operand.valueModel.has_value()) {
682 os << ", valueModel: " << operand.valueModel.value();
683 } else {
684 os << ", valueModel: nullopt";
685 }
686 os << "}";
687 return os;
688 }
689
MutationWithDimensions(const OperandTypeWithExtraParams & origin,const std::vector<uint32_t> & expectedDims)690 inline OperandTypeWithExtraParams MutationWithDimensions(
691 const OperandTypeWithExtraParams& origin, const std::vector<uint32_t>& expectedDims) {
692 OperandTypeWithExtraParams expected = origin;
693 expected.operandType.dimensionCount = expectedDims.size();
694 if (expectedDims.size() == 0) {
695 expected.operandType.dimensions = nullptr;
696 } else {
697 expected.operandType.dimensions = expectedDims.data();
698 }
699 return expected;
700 }
DescribeMutationWithDimensions(const OperandTypeWithExtraParams & origin,const std::vector<uint32_t> & expectedDims)701 std::string DescribeMutationWithDimensions(const OperandTypeWithExtraParams& origin,
702 const std::vector<uint32_t>& expectedDims) {
703 std::ostringstream osstream;
704 osstream << MutationWithDimensions(origin, expectedDims);
705 return osstream.str();
706 }
707
MATCHER_P2(IsMutationWithDimensions,origin,expectedDims,DescribeMutationWithDimensions (origin,expectedDims))708 MATCHER_P2(IsMutationWithDimensions, origin, expectedDims,
709 DescribeMutationWithDimensions(origin, expectedDims)) {
710 return arg == MutationWithDimensions(origin, expectedDims);
711 }
712
TEST(TensorRankConstraint,ExactlyWillReturnSameInputAsValidMutation)713 TEST(TensorRankConstraint, ExactlyWillReturnSameInputAsValidMutation) {
714 uint32_t opDimensions[3] = {2, 2, 2};
715 OperandTypeWithExtraParams operand{ANeuralNetworksOperandType{
716 .type = ANEURALNETWORKS_TENSOR_INT32,
717 .dimensionCount = 3,
718 .dimensions = opDimensions,
719 }};
720
721 auto constraint = TensorRankConstraint::Exactly(3);
722 auto validMutationSet = constraint.MutationsWithValidRank({operand});
723 ASSERT_EQ(validMutationSet.size(), 1u);
724 auto validMutations = *validMutationSet.begin();
725 ASSERT_EQ(validMutations.size(), 1u);
726 EXPECT_THAT(validMutations[0],
727 IsMutationWithDimensions(operand, std::vector<uint32_t>({2, 2, 2})));
728 };
729
TEST(TensorRankConstraint,ExactlyWillFailIfValidInputHasInvalidSize)730 TEST(TensorRankConstraint, ExactlyWillFailIfValidInputHasInvalidSize) {
731 uint32_t opDimensions[2] = {2, 2};
732 OperandTypeWithExtraParams operand{ANeuralNetworksOperandType{
733 .type = ANEURALNETWORKS_TENSOR_INT32,
734 .dimensionCount = 2,
735 .dimensions = opDimensions,
736 }};
737 EXPECT_DEATH(TensorRankConstraint::Exactly(3).MutationsWithValidRank({operand}),
738 ".*(A|a)ssertion.+failed.*");
739 };
740
TEST(TensorRankConstraint,ExactlyWillReturnTwoInvalidMutationsWithLowerAndHigherRank)741 TEST(TensorRankConstraint, ExactlyWillReturnTwoInvalidMutationsWithLowerAndHigherRank) {
742 uint32_t opDimensions[3] = {2, 2, 2};
743 OperandTypeWithExtraParams operand{ANeuralNetworksOperandType{
744 .type = ANEURALNETWORKS_TENSOR_INT32,
745 .dimensionCount = 3,
746 .dimensions = opDimensions,
747 }};
748
749 auto constraint = TensorRankConstraint::Exactly(3);
750 auto invalidMutations = constraint.MutationsWithInvalidRank({operand});
751 ASSERT_EQ(invalidMutations.size(), 2u);
752 std::for_each(invalidMutations.begin(), invalidMutations.end(),
753 [&operand](const std::vector<OperandTypeWithExtraParams>& mutations) {
754 EXPECT_EQ(mutations.size(), 1u);
755 if (mutations.size() == 1) {
756 EXPECT_THAT(
757 mutations[0],
758 ::testing::AnyOf(
759 IsMutationWithDimensions(operand,
760 std::vector<uint32_t>({2, 2})),
761 IsMutationWithDimensions(
762 operand, std::vector<uint32_t>({2, 2, 2, 1}))));
763 }
764 });
765 };
766
TEST(TensorRankConstraint,AtLeastWillReturnTwoValidMutationsAboveThreshold)767 TEST(TensorRankConstraint, AtLeastWillReturnTwoValidMutationsAboveThreshold) {
768 uint32_t opDimensions[3] = {2, 2, 2};
769 OperandTypeWithExtraParams operand{ANeuralNetworksOperandType{
770 .type = ANEURALNETWORKS_TENSOR_INT32,
771 .dimensionCount = 2,
772 .dimensions = opDimensions,
773 }};
774
775 auto constraint = TensorRankConstraint::AtLeast(1);
776 auto invalidMutations =
777 constraint.MutationsWithValidRank({(OperandTypeWithExtraParams)operand});
778 ASSERT_EQ(invalidMutations.size(), 2u);
779 std::for_each(
780 invalidMutations.begin(), invalidMutations.end(),
781 [&operand](const std::vector<OperandTypeWithExtraParams>& mutations) {
782 EXPECT_EQ(mutations.size(), 1u);
783 if (mutations.size() == 1) {
784 EXPECT_THAT(mutations[0],
785 ::testing::AnyOf(IsMutationWithDimensions(
786 operand, std::vector<uint32_t>({2})),
787 IsMutationWithDimensions(
788 operand, std::vector<uint32_t>({2, 2}))));
789 }
790 });
791 }
792
TEST(TensorRankConstraint,AtLeastWillReturnOneInvalidMutationsBelowThreshold)793 TEST(TensorRankConstraint, AtLeastWillReturnOneInvalidMutationsBelowThreshold) {
794 uint32_t opDimensions[2] = {2, 2};
795 OperandTypeWithExtraParams operand{ANeuralNetworksOperandType{
796 .type = ANEURALNETWORKS_TENSOR_INT32,
797 .dimensionCount = 2,
798 .dimensions = opDimensions,
799 }};
800
801 auto constraint = TensorRankConstraint::AtLeast(2);
802 auto invalidMutations =
803 constraint.MutationsWithInvalidRank({(OperandTypeWithExtraParams)operand});
804 ASSERT_EQ(invalidMutations.size(), 1u);
805 auto invalidMutationVector = *invalidMutations.begin();
806 ASSERT_EQ(invalidMutationVector.size(), 1u);
807 ASSERT_THAT(invalidMutationVector[0],
808 IsMutationWithDimensions(operand, std::vector<uint32_t>({2})));
809 }
810
TEST(TensorRankConstraint,AtLeastWillReturnNoInvalidMutationsIfThresholdIs1)811 TEST(TensorRankConstraint, AtLeastWillReturnNoInvalidMutationsIfThresholdIs1) {
812 uint32_t opDimensions[1] = {2};
813 OperandTypeWithExtraParams operand{ANeuralNetworksOperandType{
814 .type = ANEURALNETWORKS_TENSOR_INT32,
815 .dimensionCount = 1,
816 .dimensions = opDimensions,
817 }};
818
819 auto constraint = TensorRankConstraint::AtLeast(1);
820 auto invalidMutations =
821 constraint.MutationsWithInvalidRank({(OperandTypeWithExtraParams)operand});
822 ASSERT_EQ(invalidMutations.size(), 0u);
823 }
824
TEST(TensorRankConstraint,UpToWillReturnUpToTwoValidMutationsBelowThreshold)825 TEST(TensorRankConstraint, UpToWillReturnUpToTwoValidMutationsBelowThreshold) {
826 uint32_t opDimensions[3] = {2, 2, 2};
827 OperandTypeWithExtraParams operand{ANeuralNetworksOperandType{
828 .type = ANEURALNETWORKS_TENSOR_INT32,
829 .dimensionCount = 2,
830 .dimensions = opDimensions,
831 }};
832
833 auto constraint = TensorRankConstraint::UpTo(3);
834 auto invalidMutations =
835 constraint.MutationsWithValidRank({(OperandTypeWithExtraParams)operand});
836
837 auto expected = std::vector<uint32_t>({7, 7});
838 ASSERT_EQ(invalidMutations.size(), 2u);
839 std::for_each(invalidMutations.begin(), invalidMutations.end(),
840 [&operand](const std::vector<OperandTypeWithExtraParams>& mutations) {
841 EXPECT_EQ(mutations.size(), 1u);
842 if (mutations.size() == 1) {
843 EXPECT_THAT(mutations[0],
844 ::testing::AnyOf(
845 IsMutationWithDimensions(operand,
846 std::vector<uint32_t>({2})),
847 IsMutationWithDimensions(
848 operand, std::vector<uint32_t>({2, 2, 1}))));
849 }
850 });
851 }
852
TEST(TensorRankConstraint,UpToWillReturnOneInvalidMutationsAboveThreshold)853 TEST(TensorRankConstraint, UpToWillReturnOneInvalidMutationsAboveThreshold) {
854 uint32_t opDimensions[3] = {2, 2, 2};
855 OperandTypeWithExtraParams operand{ANeuralNetworksOperandType{
856 .type = ANEURALNETWORKS_TENSOR_INT32,
857 .dimensionCount = 3,
858 .dimensions = opDimensions,
859 }};
860
861 auto constraint = TensorRankConstraint::UpTo(3);
862 auto invalidMutations =
863 constraint.MutationsWithInvalidRank({(OperandTypeWithExtraParams)operand});
864 ASSERT_EQ(invalidMutations.size(), 1u);
865 auto invalidMutationVector = *invalidMutations.begin();
866 ASSERT_EQ(invalidMutationVector.size(), 1u);
867 ASSERT_THAT(invalidMutationVector[0],
868 IsMutationWithDimensions(operand, std::vector<uint32_t>({2, 2, 2, 1})));
869 }
870
TEST(TensorRankConstraint,BetweenWillReturnTwoValidMutationsOnRangeBoundaries)871 TEST(TensorRankConstraint, BetweenWillReturnTwoValidMutationsOnRangeBoundaries) {
872 uint32_t opDimensions[3] = {2, 2, 2};
873 OperandTypeWithExtraParams operand{ANeuralNetworksOperandType{
874 .type = ANEURALNETWORKS_TENSOR_INT32,
875 .dimensionCount = 3,
876 .dimensions = opDimensions,
877 }};
878
879 auto constraint = TensorRankConstraint::Between(2, 4);
880 auto validMutations = constraint.MutationsWithValidRank({(OperandTypeWithExtraParams)operand});
881 ASSERT_EQ(validMutations.size(), 2u);
882 std::for_each(validMutations.begin(), validMutations.end(),
883 [&operand](const std::vector<OperandTypeWithExtraParams>& mutations) {
884 EXPECT_EQ(mutations.size(), 1u);
885 if (mutations.size() == 1) {
886 EXPECT_THAT(
887 mutations[0],
888 ::testing::AnyOf(
889 IsMutationWithDimensions(operand,
890 std::vector<uint32_t>({2, 2})),
891 IsMutationWithDimensions(
892 operand, std::vector<uint32_t>({2, 2, 2, 1}))));
893 }
894 });
895 }
896
TEST(TensorRankConstraint,BetweenWillReturnTwoInvValidMutationsAdjacentToRangeBoundaries)897 TEST(TensorRankConstraint, BetweenWillReturnTwoInvValidMutationsAdjacentToRangeBoundaries) {
898 uint32_t opDimensions[3] = {2, 2, 2};
899 OperandTypeWithExtraParams operand{ANeuralNetworksOperandType{
900 .type = ANEURALNETWORKS_TENSOR_INT32,
901 .dimensionCount = 3,
902 .dimensions = opDimensions,
903 }};
904
905 auto constraint = TensorRankConstraint::Between(2, 4);
906 auto validMutations =
907 constraint.MutationsWithInvalidRank({(OperandTypeWithExtraParams)operand});
908 ASSERT_EQ(validMutations.size(), 2u);
909 std::for_each(
910 validMutations.begin(), validMutations.end(),
911 [&operand](const std::vector<OperandTypeWithExtraParams>& mutations) {
912 EXPECT_EQ(mutations.size(), 1u);
913 if (mutations.size() == 1) {
914 EXPECT_THAT(
915 mutations[0],
916 ::testing::AnyOf(
917 IsMutationWithDimensions(operand, std::vector<uint32_t>({2})),
918 IsMutationWithDimensions(
919 operand, std::vector<uint32_t>({2, 2, 2, 1, 1}))));
920 }
921 });
922 }
923
TEST(TensorRankConstraint,BetweenWillReturnOneInvalidMutationsOnlyIfLowerBoundIs1)924 TEST(TensorRankConstraint, BetweenWillReturnOneInvalidMutationsOnlyIfLowerBoundIs1) {
925 uint32_t opDimensions[3] = {2, 2, 2};
926 OperandTypeWithExtraParams operand{ANeuralNetworksOperandType{
927 .type = ANEURALNETWORKS_TENSOR_INT32,
928 .dimensionCount = 3,
929 .dimensions = opDimensions,
930 }};
931
932 auto constraint = TensorRankConstraint::Between(1, 4);
933 auto invalidMutations =
934 constraint.MutationsWithInvalidRank({(OperandTypeWithExtraParams)operand});
935 ASSERT_EQ(invalidMutations.size(), 1u);
936 auto invalidMutationVector = *invalidMutations.begin();
937 ASSERT_EQ(invalidMutationVector.size(), 1u);
938 ASSERT_THAT(invalidMutationVector[0],
939 IsMutationWithDimensions(operand, std::vector<uint32_t>({2, 2, 2, 1, 1})));
940 }
941
TEST(TensorRankMutator,AppliesConstraintToInputsAtGivenInputsToGenerateValidMutations)942 TEST(TensorRankMutator, AppliesConstraintToInputsAtGivenInputsToGenerateValidMutations) {
943 uint32_t opDimensions0[2] = {0, 0};
944 OperandTypeWithExtraParams operand0{ANeuralNetworksOperandType{
945 .type = ANEURALNETWORKS_TENSOR_INT32,
946 .dimensionCount = 2,
947 .dimensions = opDimensions0,
948 }};
949 uint32_t opDimensions1[1] = {1};
950 OperandTypeWithExtraParams operand1{ANeuralNetworksOperandType{
951 .type = ANEURALNETWORKS_TENSOR_INT32,
952 .dimensionCount = 1,
953 .dimensions = opDimensions1,
954 }};
955 uint32_t opDimensions2[2] = {2, 2};
956 OperandTypeWithExtraParams operand2{ANeuralNetworksOperandType{
957 .type = ANEURALNETWORKS_TENSOR_INT32,
958 .dimensionCount = 2,
959 .dimensions = opDimensions2,
960 }};
961 TensorRankMutator mutator{TensorRankConstraint::AtLeast(2), {0, 2}};
962
963 const auto mutationSet = mutator.ValidInputsMutations({operand0, operand1, operand2});
964 ASSERT_EQ(mutationSet.size(), 2u);
965 std::for_each(mutationSet.begin(), mutationSet.end(),
966 [&](const std::vector<OperandTypeWithExtraParams>& mutatedInputs) {
967 EXPECT_EQ(mutatedInputs.size(), 3u);
968 if (mutatedInputs.size() == 3) {
969 EXPECT_EQ(mutatedInputs[0].operandType.dimensionCount,
970 mutatedInputs[2].operandType.dimensionCount);
971 EXPECT_THAT(mutatedInputs[0],
972 ::testing::AnyOf(
973 IsMutationWithDimensions(
974 operand0, std::vector<uint32_t>({0, 0})),
975 IsMutationWithDimensions(
976 operand0, std::vector<uint32_t>({0, 0, 1}))));
977
978 EXPECT_EQ(mutatedInputs[1], operand1);
979
980 EXPECT_THAT(mutatedInputs[2],
981 ::testing::AnyOf(
982 IsMutationWithDimensions(
983 operand2, std::vector<uint32_t>({2, 2})),
984 IsMutationWithDimensions(
985 operand2, std::vector<uint32_t>({2, 2, 1}))));
986 }
987 });
988 }
989
TEST(TensorRankMutator,AppliesConstraintToInputsAtGivenInputsToGenerateInvalidMutations)990 TEST(TensorRankMutator, AppliesConstraintToInputsAtGivenInputsToGenerateInvalidMutations) {
991 uint32_t opDimensions0[2] = {0, 0};
992 OperandTypeWithExtraParams operand0{ANeuralNetworksOperandType{
993 .type = ANEURALNETWORKS_TENSOR_INT32,
994 .dimensionCount = 2,
995 .dimensions = opDimensions0,
996 }};
997 uint32_t opDimensions1[1] = {1};
998 OperandTypeWithExtraParams operand1{ANeuralNetworksOperandType{
999 .type = ANEURALNETWORKS_TENSOR_INT32,
1000 .dimensionCount = 1,
1001 .dimensions = opDimensions1,
1002 }};
1003 uint32_t opDimensions2[2] = {2, 2};
1004 OperandTypeWithExtraParams operand2{ANeuralNetworksOperandType{
1005 .type = ANEURALNETWORKS_TENSOR_INT32,
1006 .dimensionCount = 2,
1007 .dimensions = opDimensions2,
1008 }};
1009 TensorRankMutator mutator{TensorRankConstraint::AtLeast(2), {0, 2}};
1010
1011 const auto mutationSet = mutator.InvalidInputsMutations({operand0, operand1, operand2});
1012 ASSERT_EQ(mutationSet.size(), 1u);
1013 std::for_each(
1014 mutationSet.begin(), mutationSet.end(),
1015 [&](const std::vector<OperandTypeWithExtraParams>& mutatedInputs) {
1016 EXPECT_EQ(mutatedInputs.size(), 3u);
1017 if (mutatedInputs.size() == 3) {
1018 EXPECT_THAT(mutatedInputs[0],
1019 IsMutationWithDimensions(operand0, std::vector<uint32_t>({0})));
1020
1021 EXPECT_EQ(mutatedInputs[1], operand1);
1022
1023 EXPECT_THAT(mutatedInputs[2],
1024 IsMutationWithDimensions(operand2, std::vector<uint32_t>({2})));
1025 }
1026 });
1027 }
1028
1029 // Test quantization parameters that are inconsistent among operands.
1030 enum class BadQuantization { NONE, zeroPoint, scale };
scramble(ANeuralNetworksOperandType * type,BadQuantization bad)1031 void scramble(ANeuralNetworksOperandType* type, BadQuantization bad) {
1032 if (bad == BadQuantization::zeroPoint) {
1033 type->zeroPoint = 1;
1034 } else if (bad == BadQuantization::scale) {
1035 type->scale *= 2;
1036 }
1037 };
1038
argMinMaxTest(ANeuralNetworksOperationType operationCode,int32_t inputOperandType)1039 void argMinMaxTest(ANeuralNetworksOperationType operationCode, int32_t inputOperandType) {
1040 SCOPED_TRACE(inputOperandType);
1041 uint32_t inputDimensions[4] = {2, 2, 2, 2};
1042 ANeuralNetworksOperandType input0 = getOpType(inputOperandType, 4, inputDimensions);
1043 ANeuralNetworksOperandType axis = {
1044 .type = ANEURALNETWORKS_INT32,
1045 .dimensionCount = 0,
1046 .dimensions = nullptr,
1047 };
1048 uint32_t outputDimensions[3] = {2, 2, 2};
1049 ANeuralNetworksOperandType output = {
1050 .type = ANEURALNETWORKS_TENSOR_INT32,
1051 .dimensionCount = 3,
1052 .dimensions = outputDimensions,
1053 };
1054 OperationTestBase test(operationCode, {input0, axis}, {output});
1055 test.testOpsValidations();
1056 }
1057
TEST(OperationValidationTest,ARGMIN)1058 TEST(OperationValidationTest, ARGMIN) {
1059 argMinMaxTest(ANEURALNETWORKS_ARGMIN, ANEURALNETWORKS_TENSOR_FLOAT16);
1060 argMinMaxTest(ANEURALNETWORKS_ARGMIN, ANEURALNETWORKS_TENSOR_FLOAT32);
1061 argMinMaxTest(ANEURALNETWORKS_ARGMIN, ANEURALNETWORKS_TENSOR_INT32);
1062 argMinMaxTest(ANEURALNETWORKS_ARGMIN, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1063 argMinMaxTest(ANEURALNETWORKS_ARGMIN, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1064 }
1065
TEST(OperationValidationTest,ARGMAX)1066 TEST(OperationValidationTest, ARGMAX) {
1067 argMinMaxTest(ANEURALNETWORKS_ARGMAX, ANEURALNETWORKS_TENSOR_FLOAT16);
1068 argMinMaxTest(ANEURALNETWORKS_ARGMAX, ANEURALNETWORKS_TENSOR_FLOAT32);
1069 argMinMaxTest(ANEURALNETWORKS_ARGMAX, ANEURALNETWORKS_TENSOR_INT32);
1070 argMinMaxTest(ANEURALNETWORKS_ARGMAX, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1071 argMinMaxTest(ANEURALNETWORKS_ARGMAX, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1072 }
1073
dequantizeOpTest(int32_t inputOperandType,int32_t outputOperandType)1074 void dequantizeOpTest(int32_t inputOperandType, int32_t outputOperandType) {
1075 SCOPED_TRACE(testing::Message()
1076 << "inputType: " << inputOperandType << ", outputType: " << outputOperandType);
1077 uint32_t inputDimensions[4] = {2, 2, 2, 2};
1078 ANeuralNetworksOperandType input = getOpType(inputOperandType, 4, inputDimensions);
1079 ANeuralNetworksOperandType output = getOpType(outputOperandType, 4, inputDimensions);
1080 OperationTestBase dequantizeTest(ANEURALNETWORKS_DEQUANTIZE, {input}, {output},
1081 {{TensorRankConstraint::UpTo(4)}});
1082 dequantizeTest.testOpsValidations();
1083 }
1084
TEST(OperationValidationTest,DEQUANTIZE)1085 TEST(OperationValidationTest, DEQUANTIZE) {
1086 dequantizeOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, ANEURALNETWORKS_TENSOR_FLOAT16);
1087 dequantizeOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, ANEURALNETWORKS_TENSOR_FLOAT32);
1088 dequantizeOpTest(ANEURALNETWORKS_TENSOR_QUANT8_SYMM, ANEURALNETWORKS_TENSOR_FLOAT16);
1089 dequantizeOpTest(ANEURALNETWORKS_TENSOR_QUANT8_SYMM, ANEURALNETWORKS_TENSOR_FLOAT32);
1090 dequantizeOpTest(ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL,
1091 ANEURALNETWORKS_TENSOR_FLOAT16);
1092 dequantizeOpTest(ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL,
1093 ANEURALNETWORKS_TENSOR_FLOAT32);
1094 dequantizeOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED, ANEURALNETWORKS_TENSOR_FLOAT16);
1095 dequantizeOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED, ANEURALNETWORKS_TENSOR_FLOAT32);
1096 }
1097
expandDimsTest(int32_t inputOperandType)1098 void expandDimsTest(int32_t inputOperandType) {
1099 SCOPED_TRACE(inputOperandType);
1100 uint32_t inputDimensions[4] = {2, 2, 2, 2};
1101 ANeuralNetworksOperandType input0 = getOpType(inputOperandType, 4, inputDimensions);
1102 ANeuralNetworksOperandType axis = {
1103 .type = ANEURALNETWORKS_INT32,
1104 .dimensionCount = 0,
1105 .dimensions = nullptr,
1106 };
1107 uint32_t outputDimensions[5] = {2, 2, 2, 2, 2};
1108 ANeuralNetworksOperandType output = getOpType(inputOperandType, 5, outputDimensions);
1109 OperationTestBase test(ANEURALNETWORKS_EXPAND_DIMS, {input0, axis}, {output});
1110 test.testOpsValidations();
1111 }
1112
TEST(OperationValidationTest,EXPAND_DIMS)1113 TEST(OperationValidationTest, EXPAND_DIMS) {
1114 expandDimsTest(ANEURALNETWORKS_TENSOR_FLOAT16);
1115 expandDimsTest(ANEURALNETWORKS_TENSOR_FLOAT32);
1116 expandDimsTest(ANEURALNETWORKS_TENSOR_INT32);
1117 expandDimsTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1118 expandDimsTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1119 }
1120
gatherTest(int32_t inputOperandType)1121 void gatherTest(int32_t inputOperandType) {
1122 SCOPED_TRACE(inputOperandType);
1123 uint32_t inputDimensions[4] = {2, 2, 2, 2};
1124 ANeuralNetworksOperandType input0 = getOpType(inputOperandType, 4, inputDimensions);
1125 ANeuralNetworksOperandType axis = {
1126 .type = ANEURALNETWORKS_INT32,
1127 .dimensionCount = 0,
1128 .dimensions = nullptr,
1129 };
1130 ANeuralNetworksOperandType input2 = {
1131 .type = ANEURALNETWORKS_TENSOR_INT32,
1132 .dimensionCount = 4,
1133 .dimensions = inputDimensions,
1134 };
1135 uint32_t outputDimensions[7] = {2, 2, 2, 2, 2, 2, 2};
1136 ANeuralNetworksOperandType output = getOpType(inputOperandType, 7, outputDimensions);
1137 OperationTestBase test(ANEURALNETWORKS_GATHER, {input0, axis, input2}, {output});
1138 test.testOpsValidations();
1139 }
1140
TEST(OperationValidationTest,GATHER)1141 TEST(OperationValidationTest, GATHER) {
1142 gatherTest(ANEURALNETWORKS_TENSOR_FLOAT16);
1143 gatherTest(ANEURALNETWORKS_TENSOR_FLOAT32);
1144 gatherTest(ANEURALNETWORKS_TENSOR_INT32);
1145 gatherTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1146 gatherTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1147 }
1148
quantizeOpTest(int32_t inputOperandCode,int32_t outputOperandCode)1149 void quantizeOpTest(int32_t inputOperandCode, int32_t outputOperandCode) {
1150 uint32_t inputDimensions[4] = {2, 2, 2, 2};
1151 ANeuralNetworksOperandType input = {
1152 .type = inputOperandCode, .dimensionCount = 4, .dimensions = inputDimensions};
1153 ANeuralNetworksOperandType output = {.type = outputOperandCode,
1154 .dimensionCount = 4,
1155 .dimensions = inputDimensions,
1156 .scale = 1.0f,
1157 .zeroPoint = 0};
1158 OperationTestBase test(ANEURALNETWORKS_QUANTIZE, {input}, {output});
1159 test.testOpsValidations();
1160 }
1161
TEST(OperationValidationTest,QUANTIZE_float16)1162 TEST(OperationValidationTest, QUANTIZE_float16) {
1163 quantizeOpTest(ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1164 quantizeOpTest(ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1165 }
1166
TEST(OperationValidationTest,QUANTIZE_float32)1167 TEST(OperationValidationTest, QUANTIZE_float32) {
1168 quantizeOpTest(ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1169 quantizeOpTest(ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1170 }
1171
TEST(OperationValidationTest,QUANTIZED_16BIT_LSTM)1172 TEST(OperationValidationTest, QUANTIZED_16BIT_LSTM) {
1173 uint32_t oneDimensional[1] = {5};
1174 uint32_t twoDimensional[2] = {5, 5};
1175
1176 ANeuralNetworksOperandType int32Tensor1D = {
1177 .type = ANEURALNETWORKS_TENSOR_INT32,
1178 .dimensionCount = 1,
1179 .dimensions = oneDimensional,
1180 .scale = 0.0000318,
1181 .zeroPoint = 0,
1182 };
1183 ANeuralNetworksOperandType quant8Tensor2D = {
1184 .type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM,
1185 .dimensionCount = 2,
1186 .dimensions = twoDimensional,
1187 .scale = 0.00408021,
1188 .zeroPoint = 100,
1189 };
1190 ANeuralNetworksOperandType quant16Tensor2D = {
1191 .type = ANEURALNETWORKS_TENSOR_QUANT16_SYMM,
1192 .dimensionCount = 2,
1193 .dimensions = twoDimensional,
1194 .scale = 1.0 / 2048,
1195 .zeroPoint = 0,
1196 };
1197
1198 ANeuralNetworksOperandType input = quant8Tensor2D;
1199 ANeuralNetworksOperandType input_to_input_weights = quant8Tensor2D;
1200 ANeuralNetworksOperandType input_to_forget_weights = quant8Tensor2D;
1201 ANeuralNetworksOperandType input_to_cell_weights = quant8Tensor2D;
1202 ANeuralNetworksOperandType input_to_output_weights = quant8Tensor2D;
1203 ANeuralNetworksOperandType recurrent_to_input_weights = quant8Tensor2D;
1204 ANeuralNetworksOperandType recurrent_to_forget_weights = quant8Tensor2D;
1205 ANeuralNetworksOperandType recurrent_to_cell_weights = quant8Tensor2D;
1206 ANeuralNetworksOperandType recurrent_to_output_weights = quant8Tensor2D;
1207 ANeuralNetworksOperandType input_gate_bias = int32Tensor1D;
1208 ANeuralNetworksOperandType forget_gate_bias = int32Tensor1D;
1209 ANeuralNetworksOperandType cell_gate_bias = int32Tensor1D;
1210 ANeuralNetworksOperandType output_gate_bias = int32Tensor1D;
1211 ANeuralNetworksOperandType prev_cell_state = quant16Tensor2D;
1212 ANeuralNetworksOperandType prev_output = quant8Tensor2D;
1213
1214 ANeuralNetworksOperandType cell_state_out = quant16Tensor2D;
1215 ANeuralNetworksOperandType output = quant8Tensor2D;
1216
1217 OperationTestBase test(
1218 ANEURALNETWORKS_QUANTIZED_16BIT_LSTM,
1219 {input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights,
1220 input_to_output_weights, recurrent_to_input_weights, recurrent_to_forget_weights,
1221 recurrent_to_cell_weights, recurrent_to_output_weights, input_gate_bias,
1222 forget_gate_bias, cell_gate_bias, output_gate_bias, prev_cell_state, prev_output},
1223 {cell_state_out, output});
1224 test.testOpsValidations();
1225 }
1226
splitTest(int32_t inputOperandType)1227 void splitTest(int32_t inputOperandType) {
1228 SCOPED_TRACE(inputOperandType);
1229 uint32_t inputDimensions[4] = {2, 2, 2, 2};
1230 ANeuralNetworksOperandType input0 = getOpType(inputOperandType, 4, inputDimensions);
1231 ANeuralNetworksOperandType axis = {
1232 .type = ANEURALNETWORKS_INT32,
1233 .dimensionCount = 0,
1234 .dimensions = nullptr,
1235 };
1236 ANeuralNetworksOperandType count = {
1237 .type = ANEURALNETWORKS_INT32,
1238 .dimensionCount = 0,
1239 .dimensions = nullptr,
1240 };
1241 uint32_t outputDimensions[2] = {2, 2};
1242 ANeuralNetworksOperandType output0 = getOpType(inputOperandType, 2, outputDimensions);
1243 ANeuralNetworksOperandType output1 = getOpType(inputOperandType, 2, outputDimensions);
1244 OperationTestBase test(ANEURALNETWORKS_SPLIT, {input0, axis, count}, {output0, output1});
1245 test.testOpsValidations();
1246 }
1247
TEST(OperationValidationTest,SPLIT)1248 TEST(OperationValidationTest, SPLIT) {
1249 splitTest(ANEURALNETWORKS_TENSOR_FLOAT16);
1250 splitTest(ANEURALNETWORKS_TENSOR_FLOAT32);
1251 splitTest(ANEURALNETWORKS_TENSOR_INT32);
1252 splitTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1253 splitTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1254 }
1255
tileTest(int32_t inputOperandType)1256 void tileTest(int32_t inputOperandType) {
1257 SCOPED_TRACE(inputOperandType);
1258 uint32_t inputDimensions[4] = {2, 2, 2, 2};
1259 ANeuralNetworksOperandType input0 = getOpType(inputOperandType, 4, inputDimensions);
1260 uint32_t multiplesDimensions[1] = {4};
1261 ANeuralNetworksOperandType multiples = {
1262 .type = ANEURALNETWORKS_TENSOR_INT32,
1263 .dimensionCount = 1,
1264 .dimensions = multiplesDimensions,
1265 };
1266 uint32_t outputDimensions[8] = {2, 2, 2, 2, 2, 2, 2, 2};
1267 ANeuralNetworksOperandType output0 = getOpType(inputOperandType, 8, outputDimensions);
1268 OperationTestBase test(ANEURALNETWORKS_TILE, {input0, multiples}, {output0});
1269 test.testOpsValidations();
1270 }
1271
TEST(OperationValidationTest,TILE)1272 TEST(OperationValidationTest, TILE) {
1273 tileTest(ANEURALNETWORKS_TENSOR_FLOAT16);
1274 tileTest(ANEURALNETWORKS_TENSOR_FLOAT32);
1275 tileTest(ANEURALNETWORKS_TENSOR_INT32);
1276 tileTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1277 tileTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1278 }
1279
topkV2Test(int32_t inputOperandType)1280 void topkV2Test(int32_t inputOperandType) {
1281 SCOPED_TRACE(inputOperandType);
1282 uint32_t inputDimensions[4] = {4, 5, 6, 7};
1283 ANeuralNetworksOperandType input = getOpType(inputOperandType, 4, inputDimensions);
1284 ANeuralNetworksOperandType k = getOpType(ANEURALNETWORKS_INT32);
1285 uint32_t outputDimensions[4] = {4, 5, 6, 3};
1286 ANeuralNetworksOperandType outputValues = getOpType(inputOperandType, 4, outputDimensions);
1287 ANeuralNetworksOperandType outputIndices =
1288 getOpType(ANEURALNETWORKS_TENSOR_INT32, 4, outputDimensions);
1289 OperationTestBase test(ANEURALNETWORKS_TOPK_V2, {input, k}, {outputValues, outputIndices});
1290 test.testOpsValidations();
1291 }
1292
TEST(OperationValidationTest,TOPK_V2)1293 TEST(OperationValidationTest, TOPK_V2) {
1294 topkV2Test(ANEURALNETWORKS_TENSOR_FLOAT16);
1295 topkV2Test(ANEURALNETWORKS_TENSOR_FLOAT32);
1296 topkV2Test(ANEURALNETWORKS_TENSOR_INT32);
1297 topkV2Test(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1298 topkV2Test(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1299 }
1300
simpleMathOpTest(ANeuralNetworksOperationType operationCode,int32_t operandCode)1301 void simpleMathOpTest(ANeuralNetworksOperationType operationCode, int32_t operandCode) {
1302 uint32_t inputDimensions[4] = {2, 2, 2, 2};
1303 ANeuralNetworksOperandType input1 = getOpType(operandCode, 4, inputDimensions);
1304
1305 ANeuralNetworksOperandType input2 = input1;
1306 ANeuralNetworksOperandType output = input1;
1307 ANeuralNetworksOperandType activation = {.type = ANEURALNETWORKS_INT32,
1308 .dimensionCount = 0,
1309 .dimensions = nullptr,
1310 .scale = 0.0f,
1311 .zeroPoint = 0};
1312
1313 OperationTestBase simpleMathTest(
1314 operationCode, {input1, input2, activation}, {output},
1315 {{TensorRankConstraint::UpTo(4), {0}}, {TensorRankConstraint::UpTo(4), {1}}});
1316 simpleMathTest.testOpsValidations();
1317 }
1318
TEST(OperationValidationTest,ADD_float16)1319 TEST(OperationValidationTest, ADD_float16) {
1320 simpleMathOpTest(ANEURALNETWORKS_ADD, ANEURALNETWORKS_TENSOR_FLOAT16);
1321 }
1322
TEST(OperationValidationTest,ADD_float32)1323 TEST(OperationValidationTest, ADD_float32) {
1324 simpleMathOpTest(ANEURALNETWORKS_ADD, ANEURALNETWORKS_TENSOR_FLOAT32);
1325 }
1326
TEST(OperationValidationTest,ADD_quant8)1327 TEST(OperationValidationTest, ADD_quant8) {
1328 simpleMathOpTest(ANEURALNETWORKS_ADD, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1329 }
1330
TEST(OperationValidationTest,ADD_quant8_signed)1331 TEST(OperationValidationTest, ADD_quant8_signed) {
1332 simpleMathOpTest(ANEURALNETWORKS_ADD, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1333 }
1334
TEST(OperationValidationTest,ADD_int32)1335 TEST(OperationValidationTest, ADD_int32) {
1336 simpleMathOpTest(ANEURALNETWORKS_ADD, ANEURALNETWORKS_TENSOR_INT32);
1337 }
1338
TEST(OperationValidationTest,MUL_float16)1339 TEST(OperationValidationTest, MUL_float16) {
1340 simpleMathOpTest(ANEURALNETWORKS_MUL, ANEURALNETWORKS_TENSOR_FLOAT16);
1341 }
1342
TEST(OperationValidationTest,MUL_float32)1343 TEST(OperationValidationTest, MUL_float32) {
1344 simpleMathOpTest(ANEURALNETWORKS_MUL, ANEURALNETWORKS_TENSOR_FLOAT32);
1345 }
1346
TEST(OperationValidationTest,MUL_quant8)1347 TEST(OperationValidationTest, MUL_quant8) {
1348 simpleMathOpTest(ANEURALNETWORKS_MUL, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1349 }
1350
TEST(OperationValidationTest,MUL_quant8_signed)1351 TEST(OperationValidationTest, MUL_quant8_signed) {
1352 simpleMathOpTest(ANEURALNETWORKS_MUL, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1353 }
1354
TEST(OperationValidationTest,MUL_int32)1355 TEST(OperationValidationTest, MUL_int32) {
1356 simpleMathOpTest(ANEURALNETWORKS_MUL, ANEURALNETWORKS_TENSOR_INT32);
1357 }
1358
TEST(OperationValidationTest,SUB_float16)1359 TEST(OperationValidationTest, SUB_float16) {
1360 simpleMathOpTest(ANEURALNETWORKS_SUB, ANEURALNETWORKS_TENSOR_FLOAT16);
1361 }
1362
TEST(OperationValidationTest,SUB_float32)1363 TEST(OperationValidationTest, SUB_float32) {
1364 simpleMathOpTest(ANEURALNETWORKS_SUB, ANEURALNETWORKS_TENSOR_FLOAT32);
1365 }
1366
TEST(OperationValidationTest,SUB_quant8)1367 TEST(OperationValidationTest, SUB_quant8) {
1368 simpleMathOpTest(ANEURALNETWORKS_SUB, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1369 }
1370
TEST(OperationValidationTest,SUB_quant8_signed)1371 TEST(OperationValidationTest, SUB_quant8_signed) {
1372 simpleMathOpTest(ANEURALNETWORKS_SUB, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1373 }
1374
TEST(OperationValidationTest,SUB_int32)1375 TEST(OperationValidationTest, SUB_int32) {
1376 simpleMathOpTest(ANEURALNETWORKS_SUB, ANEURALNETWORKS_TENSOR_INT32);
1377 }
1378
TEST(OperationValidationTest,DIV_float16)1379 TEST(OperationValidationTest, DIV_float16) {
1380 simpleMathOpTest(ANEURALNETWORKS_DIV, ANEURALNETWORKS_TENSOR_FLOAT16);
1381 }
1382
TEST(OperationValidationTest,DIV_float32)1383 TEST(OperationValidationTest, DIV_float32) {
1384 simpleMathOpTest(ANEURALNETWORKS_DIV, ANEURALNETWORKS_TENSOR_FLOAT32);
1385 }
1386
TEST(OperationValidationTest,DIV_int32)1387 TEST(OperationValidationTest, DIV_int32) {
1388 simpleMathOpTest(ANEURALNETWORKS_DIV, ANEURALNETWORKS_TENSOR_INT32);
1389 }
1390
TEST(OperationValidationTest,MUL_quant8_bad_output_scale)1391 TEST(OperationValidationTest, MUL_quant8_bad_output_scale) {
1392 uint32_t inputDimensions[4] = {2, 2, 2, 2};
1393 ANeuralNetworksOperandType input1 =
1394 getOpType(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, 4, inputDimensions);
1395 ANeuralNetworksOperandType input2 = input1;
1396 ANeuralNetworksOperandType output = input1;
1397 input1.scale = 1.0f;
1398 input2.scale = 1.0f;
1399 output.scale = 0.5f;
1400 ANeuralNetworksOperandType activation = {.type = ANEURALNETWORKS_INT32,
1401 .dimensionCount = 0,
1402 .dimensions = nullptr,
1403 .scale = 0.0f,
1404 .zeroPoint = 0};
1405
1406 OperationTestBase mulTest(ANEURALNETWORKS_MUL, {input1, input2, activation}, {output});
1407 mulTest.testFailure(ANEURALNETWORKS_BAD_DATA);
1408 }
1409
binaryOpTest(ANeuralNetworksOperationType operationCode,int32_t operandCode)1410 void binaryOpTest(ANeuralNetworksOperationType operationCode, int32_t operandCode) {
1411 uint32_t inputDimensions[] = {2, 2, 2, 2, 2};
1412 ANeuralNetworksOperandType input1 = getOpType(operandCode, 5, inputDimensions);
1413
1414 ANeuralNetworksOperandType input2 = input1;
1415 ANeuralNetworksOperandType output = input1;
1416
1417 OperationTestBase test(operationCode, {input1, input2}, {output});
1418 test.testOpsValidations();
1419 }
1420
TEST(OperationValidationTest,MAXIMUM_float16)1421 TEST(OperationValidationTest, MAXIMUM_float16) {
1422 binaryOpTest(ANEURALNETWORKS_MAXIMUM, ANEURALNETWORKS_TENSOR_FLOAT16);
1423 }
1424
TEST(OperationValidationTest,MAXIMUM_float32)1425 TEST(OperationValidationTest, MAXIMUM_float32) {
1426 binaryOpTest(ANEURALNETWORKS_MAXIMUM, ANEURALNETWORKS_TENSOR_FLOAT32);
1427 }
1428
TEST(OperationValidationTest,MAXIMUM_int32)1429 TEST(OperationValidationTest, MAXIMUM_int32) {
1430 binaryOpTest(ANEURALNETWORKS_MAXIMUM, ANEURALNETWORKS_TENSOR_INT32);
1431 }
1432
TEST(OperationValidationTest,MAXIMUM_quant8)1433 TEST(OperationValidationTest, MAXIMUM_quant8) {
1434 binaryOpTest(ANEURALNETWORKS_MAXIMUM, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1435 }
1436
TEST(OperationValidationTest,MAXIMUM_quant8signed)1437 TEST(OperationValidationTest, MAXIMUM_quant8signed) {
1438 binaryOpTest(ANEURALNETWORKS_MAXIMUM, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1439 }
1440
TEST(OperationValidationTest,MINIMUM_float16)1441 TEST(OperationValidationTest, MINIMUM_float16) {
1442 binaryOpTest(ANEURALNETWORKS_MINIMUM, ANEURALNETWORKS_TENSOR_FLOAT16);
1443 }
1444
TEST(OperationValidationTest,MINIMUM_float32)1445 TEST(OperationValidationTest, MINIMUM_float32) {
1446 binaryOpTest(ANEURALNETWORKS_MINIMUM, ANEURALNETWORKS_TENSOR_FLOAT32);
1447 }
1448
TEST(OperationValidationTest,MINIMUM_int32)1449 TEST(OperationValidationTest, MINIMUM_int32) {
1450 binaryOpTest(ANEURALNETWORKS_MINIMUM, ANEURALNETWORKS_TENSOR_INT32);
1451 }
1452
TEST(OperationValidationTest,MINIMUM_quant8)1453 TEST(OperationValidationTest, MINIMUM_quant8) {
1454 binaryOpTest(ANEURALNETWORKS_MINIMUM, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1455 }
1456
TEST(OperationValidationTest,MINIMUM_quant8signed)1457 TEST(OperationValidationTest, MINIMUM_quant8signed) {
1458 binaryOpTest(ANEURALNETWORKS_MINIMUM, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1459 }
1460
activationOpTest(ANeuralNetworksOperationType operationCode,int32_t operandCode)1461 void activationOpTest(ANeuralNetworksOperationType operationCode, int32_t operandCode) {
1462 uint32_t inputDimensions[4] = {2, 2, 2, 2};
1463 ANeuralNetworksOperandType input = getOpType(operandCode, 4, inputDimensions);
1464
1465 ANeuralNetworksOperandType output = input;
1466 std::vector<TensorRankMutator> inputRankMutators;
1467 if (operationCode == ANEURALNETWORKS_FLOOR || operationCode == ANEURALNETWORKS_LOGISTIC ||
1468 operationCode == ANEURALNETWORKS_RELU || operationCode == ANEURALNETWORKS_RELU1 ||
1469 operationCode == ANEURALNETWORKS_RELU6 || operationCode == ANEURALNETWORKS_TANH) {
1470 inputRankMutators.push_back({TensorRankConstraint::UpTo(4)});
1471 }
1472 OperationTestBase test(operationCode, {input}, {output}, inputRankMutators);
1473 test.testOpsValidations();
1474 }
1475
TEST(OperationValidationTest,ABS_float16)1476 TEST(OperationValidationTest, ABS_float16) {
1477 activationOpTest(ANEURALNETWORKS_ABS, ANEURALNETWORKS_TENSOR_FLOAT16);
1478 }
1479
TEST(OperationValidationTest,ABS_float32)1480 TEST(OperationValidationTest, ABS_float32) {
1481 activationOpTest(ANEURALNETWORKS_ABS, ANEURALNETWORKS_TENSOR_FLOAT32);
1482 }
1483
TEST(OperationValidationTest,ABS_int32)1484 TEST(OperationValidationTest, ABS_int32) {
1485 activationOpTest(ANEURALNETWORKS_ABS, ANEURALNETWORKS_TENSOR_INT32);
1486 }
1487
TEST(OperationValidationTest,EXP_float16)1488 TEST(OperationValidationTest, EXP_float16) {
1489 activationOpTest(ANEURALNETWORKS_EXP, ANEURALNETWORKS_TENSOR_FLOAT16);
1490 }
1491
TEST(OperationValidationTest,EXP_float32)1492 TEST(OperationValidationTest, EXP_float32) {
1493 activationOpTest(ANEURALNETWORKS_EXP, ANEURALNETWORKS_TENSOR_FLOAT32);
1494 }
1495
TEST(OperationValidationTest,LOG_float16)1496 TEST(OperationValidationTest, LOG_float16) {
1497 activationOpTest(ANEURALNETWORKS_LOG, ANEURALNETWORKS_TENSOR_FLOAT16);
1498 }
1499
TEST(OperationValidationTest,LOG_float32)1500 TEST(OperationValidationTest, LOG_float32) {
1501 activationOpTest(ANEURALNETWORKS_LOG, ANEURALNETWORKS_TENSOR_FLOAT32);
1502 }
1503
TEST(OperationValidationTest,RSQRT_float16)1504 TEST(OperationValidationTest, RSQRT_float16) {
1505 activationOpTest(ANEURALNETWORKS_RSQRT, ANEURALNETWORKS_TENSOR_FLOAT16);
1506 }
1507
TEST(OperationValidationTest,RSQRT_float32)1508 TEST(OperationValidationTest, RSQRT_float32) {
1509 activationOpTest(ANEURALNETWORKS_RSQRT, ANEURALNETWORKS_TENSOR_FLOAT32);
1510 }
1511
TEST(OperationValidationTest,RSQRT_quant8)1512 TEST(OperationValidationTest, RSQRT_quant8) {
1513 activationOpTest(ANEURALNETWORKS_RSQRT, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1514 }
1515
TEST(OperationValidationTest,RSQRT_quant8_signed)1516 TEST(OperationValidationTest, RSQRT_quant8_signed) {
1517 activationOpTest(ANEURALNETWORKS_RSQRT, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1518 }
1519
TEST(OperationValidationTest,SIN_float16)1520 TEST(OperationValidationTest, SIN_float16) {
1521 activationOpTest(ANEURALNETWORKS_SIN, ANEURALNETWORKS_TENSOR_FLOAT16);
1522 }
1523
TEST(OperationValidationTest,SIN_float32)1524 TEST(OperationValidationTest, SIN_float32) {
1525 activationOpTest(ANEURALNETWORKS_SIN, ANEURALNETWORKS_TENSOR_FLOAT32);
1526 }
1527
TEST(OperationValidationTest,SQRT_float16)1528 TEST(OperationValidationTest, SQRT_float16) {
1529 activationOpTest(ANEURALNETWORKS_SQRT, ANEURALNETWORKS_TENSOR_FLOAT16);
1530 }
1531
TEST(OperationValidationTest,SQRT_float32)1532 TEST(OperationValidationTest, SQRT_float32) {
1533 activationOpTest(ANEURALNETWORKS_SQRT, ANEURALNETWORKS_TENSOR_FLOAT32);
1534 }
1535
TEST(OperationValidationTest,NEG_float16)1536 TEST(OperationValidationTest, NEG_float16) {
1537 activationOpTest(ANEURALNETWORKS_NEG, ANEURALNETWORKS_TENSOR_FLOAT16);
1538 }
1539
TEST(OperationValidationTest,NEG_float32)1540 TEST(OperationValidationTest, NEG_float32) {
1541 activationOpTest(ANEURALNETWORKS_NEG, ANEURALNETWORKS_TENSOR_FLOAT32);
1542 }
1543
TEST(OperationValidationTest,NEG_int32)1544 TEST(OperationValidationTest, NEG_int32) {
1545 activationOpTest(ANEURALNETWORKS_NEG, ANEURALNETWORKS_TENSOR_INT32);
1546 }
1547
TEST(OperationValidationTest,FLOOR_float16)1548 TEST(OperationValidationTest, FLOOR_float16) {
1549 activationOpTest(ANEURALNETWORKS_FLOOR, ANEURALNETWORKS_TENSOR_FLOAT16);
1550 }
1551
TEST(OperationValidationTest,FLOOR_float32)1552 TEST(OperationValidationTest, FLOOR_float32) {
1553 activationOpTest(ANEURALNETWORKS_FLOOR, ANEURALNETWORKS_TENSOR_FLOAT32);
1554 }
1555
TEST(OperationValidationTest,LOGICAL_NOT_bool)1556 TEST(OperationValidationTest, LOGICAL_NOT_bool) {
1557 activationOpTest(ANEURALNETWORKS_LOGICAL_NOT, ANEURALNETWORKS_TENSOR_BOOL8);
1558 }
1559
TEST(OperationValidationTest,TANH_float16)1560 TEST(OperationValidationTest, TANH_float16) {
1561 activationOpTest(ANEURALNETWORKS_TANH, ANEURALNETWORKS_TENSOR_FLOAT16);
1562 }
1563
TEST(OperationValidationTest,TANH_float32)1564 TEST(OperationValidationTest, TANH_float32) {
1565 activationOpTest(ANEURALNETWORKS_TANH, ANEURALNETWORKS_TENSOR_FLOAT32);
1566 }
1567
TEST(OperationValidationTest,TANH_quant8)1568 TEST(OperationValidationTest, TANH_quant8) {
1569 activationOpTest(ANEURALNETWORKS_TANH, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1570 }
1571
TEST(OperationValidationTest,TANH_quant8_signed)1572 TEST(OperationValidationTest, TANH_quant8_signed) {
1573 activationOpTest(ANEURALNETWORKS_TANH, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1574 }
1575
TEST(OperationValidationTest,RELU_float16)1576 TEST(OperationValidationTest, RELU_float16) {
1577 activationOpTest(ANEURALNETWORKS_RELU, ANEURALNETWORKS_TENSOR_FLOAT16);
1578 }
1579
TEST(OperationValidationTest,RELU1_float16)1580 TEST(OperationValidationTest, RELU1_float16) {
1581 activationOpTest(ANEURALNETWORKS_RELU, ANEURALNETWORKS_TENSOR_FLOAT16);
1582 }
1583
TEST(OperationValidationTest,RELU6_float16)1584 TEST(OperationValidationTest, RELU6_float16) {
1585 activationOpTest(ANEURALNETWORKS_RELU, ANEURALNETWORKS_TENSOR_FLOAT16);
1586 }
1587
TEST(OperationValidationTest,RELU_float32)1588 TEST(OperationValidationTest, RELU_float32) {
1589 activationOpTest(ANEURALNETWORKS_RELU, ANEURALNETWORKS_TENSOR_FLOAT32);
1590 }
1591
TEST(OperationValidationTest,RELU1_float32)1592 TEST(OperationValidationTest, RELU1_float32) {
1593 activationOpTest(ANEURALNETWORKS_RELU, ANEURALNETWORKS_TENSOR_FLOAT32);
1594 }
1595
TEST(OperationValidationTest,RELU6_float32)1596 TEST(OperationValidationTest, RELU6_float32) {
1597 activationOpTest(ANEURALNETWORKS_RELU, ANEURALNETWORKS_TENSOR_FLOAT32);
1598 }
1599
TEST(OperationValidationTest,RELU_quant8)1600 TEST(OperationValidationTest, RELU_quant8) {
1601 activationOpTest(ANEURALNETWORKS_RELU, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1602 }
1603
TEST(OperationValidationTest,RELU1_quant8)1604 TEST(OperationValidationTest, RELU1_quant8) {
1605 activationOpTest(ANEURALNETWORKS_RELU, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1606 }
1607
TEST(OperationValidationTest,RELU6_quant8)1608 TEST(OperationValidationTest, RELU6_quant8) {
1609 activationOpTest(ANEURALNETWORKS_RELU, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1610 }
1611
TEST(OperationValidationTest,RELU_quant8_signed)1612 TEST(OperationValidationTest, RELU_quant8_signed) {
1613 activationOpTest(ANEURALNETWORKS_RELU, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1614 }
1615
TEST(OperationValidationTest,RELU1_quant8_signed)1616 TEST(OperationValidationTest, RELU1_quant8_signed) {
1617 activationOpTest(ANEURALNETWORKS_RELU, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1618 }
1619
TEST(OperationValidationTest,RELU6_quant8_signed)1620 TEST(OperationValidationTest, RELU6_quant8_signed) {
1621 activationOpTest(ANEURALNETWORKS_RELU, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1622 }
1623
TEST(OperationValidationTest,LOGISTIC_float16)1624 TEST(OperationValidationTest, LOGISTIC_float16) {
1625 activationOpTest(ANEURALNETWORKS_LOGISTIC, ANEURALNETWORKS_TENSOR_FLOAT16);
1626 }
1627
TEST(OperationValidationTest,LOGISTIC_float32)1628 TEST(OperationValidationTest, LOGISTIC_float32) {
1629 activationOpTest(ANEURALNETWORKS_LOGISTIC, ANEURALNETWORKS_TENSOR_FLOAT32);
1630 }
1631
TEST(OperationValidationTest,LOGISTIC_quant8)1632 TEST(OperationValidationTest, LOGISTIC_quant8) {
1633 activationOpTest(ANEURALNETWORKS_LOGISTIC, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1634 }
1635
TEST(OperationValidationTest,LOGISTIC_quant8_signed)1636 TEST(OperationValidationTest, LOGISTIC_quant8_signed) {
1637 activationOpTest(ANEURALNETWORKS_LOGISTIC, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1638 }
1639
TEST(OperationValidationTest,HARD_SWISH_float16)1640 TEST(OperationValidationTest, HARD_SWISH_float16) {
1641 activationOpTest(ANEURALNETWORKS_HARD_SWISH, ANEURALNETWORKS_TENSOR_FLOAT16);
1642 }
1643
TEST(OperationValidationTest,HARD_SWISH_float32)1644 TEST(OperationValidationTest, HARD_SWISH_float32) {
1645 activationOpTest(ANEURALNETWORKS_HARD_SWISH, ANEURALNETWORKS_TENSOR_FLOAT32);
1646 }
1647
TEST(OperationValidationTest,HARD_SWISH_quant8)1648 TEST(OperationValidationTest, HARD_SWISH_quant8) {
1649 activationOpTest(ANEURALNETWORKS_HARD_SWISH, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1650 }
1651
TEST(OperationValidationTest,HARD_SWISH_quant8_signed)1652 TEST(OperationValidationTest, HARD_SWISH_quant8_signed) {
1653 activationOpTest(ANEURALNETWORKS_HARD_SWISH, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1654 }
1655
eluOpTest(int32_t operandCode)1656 void eluOpTest(int32_t operandCode) {
1657 uint32_t inputDimensions[4] = {2, 2, 2, 2};
1658 ANeuralNetworksOperandType input = getOpType(operandCode, 4, inputDimensions);
1659 ANeuralNetworksOperandType alpha = (operandCode == ANEURALNETWORKS_TENSOR_FLOAT32)
1660 ? getOpType(ANEURALNETWORKS_FLOAT32)
1661 : getOpType(ANEURALNETWORKS_FLOAT16);
1662
1663 ANeuralNetworksOperandType output = input;
1664 OperationTestBase test(ANEURALNETWORKS_ELU, {input, alpha}, {output});
1665 test.testOpsValidations();
1666 }
1667
TEST(OperationValidationTest,ELU_float16)1668 TEST(OperationValidationTest, ELU_float16) {
1669 eluOpTest(ANEURALNETWORKS_TENSOR_FLOAT16);
1670 }
1671
TEST(OperationValidationTest,ELU_float32)1672 TEST(OperationValidationTest, ELU_float32) {
1673 eluOpTest(ANEURALNETWORKS_TENSOR_FLOAT32);
1674 }
1675
reshapeOpTest(int32_t inputOperandCode)1676 void reshapeOpTest(int32_t inputOperandCode) {
1677 SCOPED_TRACE(inputOperandCode);
1678 uint32_t inputDimensions[3] = {2, 3, 4};
1679 ANeuralNetworksOperandType input = getOpType(inputOperandCode, 3, inputDimensions);
1680 uint32_t shapeDims[1] = {2};
1681 ANeuralNetworksOperandType shape = getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, shapeDims);
1682 uint32_t outputDimensions[2] = {4, 6};
1683 ANeuralNetworksOperandType output = getOpType(inputOperandCode, 2, outputDimensions);
1684 OperationTestBase test(ANEURALNETWORKS_RESHAPE, {input, shape}, {output},
1685 {{TensorRankConstraint::UpTo(4)}});
1686 test.testOpsValidations();
1687 }
1688
TEST(OperationValidationTest,RESHAPE)1689 TEST(OperationValidationTest, RESHAPE) {
1690 reshapeOpTest(ANEURALNETWORKS_TENSOR_FLOAT16);
1691 reshapeOpTest(ANEURALNETWORKS_TENSOR_FLOAT32);
1692 reshapeOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1693 reshapeOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1694 reshapeOpTest(ANEURALNETWORKS_TENSOR_INT32);
1695 }
1696
logSoftmaxOpTest(int32_t inputOperandCode)1697 void logSoftmaxOpTest(int32_t inputOperandCode) {
1698 uint32_t inputDimensions[3] = {2, 2, 2};
1699 ANeuralNetworksOperandType input = {.type = inputOperandCode,
1700 .dimensionCount = 3,
1701 .dimensions = inputDimensions,
1702 .scale = 0.0f,
1703 .zeroPoint = 0};
1704 ANeuralNetworksOperandType beta = {.type = (inputOperandCode == ANEURALNETWORKS_TENSOR_FLOAT32)
1705 ? ANEURALNETWORKS_FLOAT32
1706 : ANEURALNETWORKS_FLOAT16,
1707 .dimensionCount = 0,
1708 .dimensions = nullptr,
1709 .scale = 0.0f,
1710 .zeroPoint = 0};
1711 ANeuralNetworksOperandType axis = {.type = ANEURALNETWORKS_INT32,
1712 .dimensionCount = 0,
1713 .dimensions = nullptr,
1714 .scale = 0.0f,
1715 .zeroPoint = 0};
1716
1717 ANeuralNetworksOperandType output = {.type = inputOperandCode,
1718 .dimensionCount = 3,
1719 .dimensions = inputDimensions,
1720 .scale = 0.0f,
1721 .zeroPoint = 0};
1722
1723 OperationTestBase test(ANEURALNETWORKS_LOG_SOFTMAX, {input, beta, axis}, {output});
1724 test.testOpsValidations();
1725 }
1726
TEST(OperationValidationTest,LOG_SOFTMAX_float16)1727 TEST(OperationValidationTest, LOG_SOFTMAX_float16) {
1728 logSoftmaxOpTest(ANEURALNETWORKS_TENSOR_FLOAT16);
1729 }
1730
TEST(OperationValidationTest,LOG_SOFTMAX_float32)1731 TEST(OperationValidationTest, LOG_SOFTMAX_float32) {
1732 logSoftmaxOpTest(ANEURALNETWORKS_TENSOR_FLOAT32);
1733 }
1734
meanOpTest(int32_t inputOperandCode)1735 void meanOpTest(int32_t inputOperandCode) {
1736 uint32_t inputDimensions[3] = {2, 2, 2};
1737 ANeuralNetworksOperandType input = getOpType(inputOperandCode, 3, inputDimensions);
1738 ANeuralNetworksOperandType dims = getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, inputDimensions);
1739 ANeuralNetworksOperandType keepDims = getOpType(ANEURALNETWORKS_INT32);
1740 ANeuralNetworksOperandType output = getOpType(inputOperandCode, 3, inputDimensions);
1741
1742 OperationTestBase test(ANEURALNETWORKS_MEAN, {input, dims, keepDims}, {output},
1743 {{TensorRankConstraint::UpTo(4)}});
1744 test.testOpsValidations();
1745 }
1746
TEST(OperationValidationTest,MEAN_float16)1747 TEST(OperationValidationTest, MEAN_float16) {
1748 meanOpTest(ANEURALNETWORKS_TENSOR_FLOAT16);
1749 }
1750
TEST(OperationValidationTest,MEAN_float32)1751 TEST(OperationValidationTest, MEAN_float32) {
1752 meanOpTest(ANEURALNETWORKS_TENSOR_FLOAT32);
1753 }
1754
TEST(OperationValidationTest,MEAN_quant8)1755 TEST(OperationValidationTest, MEAN_quant8) {
1756 meanOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1757 }
1758
TEST(OperationValidationTest,MEAN_quant8_signed)1759 TEST(OperationValidationTest, MEAN_quant8_signed) {
1760 meanOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1761 }
1762
padOpTest(ANeuralNetworksOperationType operationCode,int32_t inputOperandCode)1763 void padOpTest(ANeuralNetworksOperationType operationCode, int32_t inputOperandCode) {
1764 SCOPED_TRACE(inputOperandCode);
1765
1766 uint32_t inputDimensions[4] = {2, 2, 2, 2};
1767 ANeuralNetworksOperandType input = getOpType(inputOperandCode, 4, inputDimensions);
1768 uint32_t padSizeDimensions[2] = {4, 2};
1769 ANeuralNetworksOperandType padSize =
1770 getOpType(ANEURALNETWORKS_TENSOR_INT32, 2, padSizeDimensions);
1771 std::vector<ANeuralNetworksOperandType> inputs = {input, padSize};
1772 if (operationCode == ANEURALNETWORKS_MIRROR_PAD) {
1773 inputs.push_back(getOpType(ANEURALNETWORKS_INT32));
1774 }
1775
1776 uint32_t outputDimensions[4] = {4, 3, 4, 3};
1777 ANeuralNetworksOperandType output = getOpType(inputOperandCode, 4, outputDimensions);
1778
1779 std::vector<TensorRankMutator> inputRankMutators;
1780 if (operationCode == ANEURALNETWORKS_PAD) {
1781 inputRankMutators.push_back({TensorRankConstraint::UpTo(4)});
1782 }
1783
1784 OperationTestBase test(operationCode, inputs, {output}, inputRankMutators);
1785 test.testOpsValidations();
1786 }
1787
TEST(OperationValidationTest,PAD)1788 TEST(OperationValidationTest, PAD) {
1789 padOpTest(ANEURALNETWORKS_PAD, ANEURALNETWORKS_TENSOR_FLOAT16);
1790 padOpTest(ANEURALNETWORKS_PAD, ANEURALNETWORKS_TENSOR_FLOAT32);
1791 padOpTest(ANEURALNETWORKS_PAD, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1792 padOpTest(ANEURALNETWORKS_PAD, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1793 }
1794
TEST(OperationValidationTest,MIRROR_PAD)1795 TEST(OperationValidationTest, MIRROR_PAD) {
1796 padOpTest(ANEURALNETWORKS_MIRROR_PAD, ANEURALNETWORKS_TENSOR_FLOAT16);
1797 padOpTest(ANEURALNETWORKS_MIRROR_PAD, ANEURALNETWORKS_TENSOR_FLOAT32);
1798 padOpTest(ANEURALNETWORKS_MIRROR_PAD, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1799 padOpTest(ANEURALNETWORKS_MIRROR_PAD, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1800 padOpTest(ANEURALNETWORKS_MIRROR_PAD, ANEURALNETWORKS_TENSOR_INT32);
1801 }
1802
padV2OpTest(int32_t inputOperandCode)1803 void padV2OpTest(int32_t inputOperandCode) {
1804 SCOPED_TRACE(inputOperandCode);
1805 uint32_t inputDimensions[4] = {2, 2, 2, 2};
1806 ANeuralNetworksOperandType input = getOpType(inputOperandCode, 4, inputDimensions);
1807 uint32_t padSizeDimensions[1] = {4};
1808 ANeuralNetworksOperandType padSize =
1809 getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, padSizeDimensions);
1810 ANeuralNetworksOperandType padValue = getOpType(ANEURALNETWORKS_FLOAT32);
1811 if (inputOperandCode == ANEURALNETWORKS_TENSOR_FLOAT16) {
1812 padValue = getOpType(ANEURALNETWORKS_FLOAT16);
1813 } else if (inputOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM ||
1814 inputOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED) {
1815 padValue = getOpType(ANEURALNETWORKS_INT32);
1816 }
1817 uint32_t outputDimensions[4] = {4, 3, 4, 3};
1818 ANeuralNetworksOperandType output = getOpType(inputOperandCode, 4, outputDimensions);
1819 OperationTestBase test(ANEURALNETWORKS_PAD_V2, {input, padSize, padValue}, {output},
1820 {{TensorRankConstraint::UpTo(4)}});
1821 test.testOpsValidations();
1822 }
1823
TEST(OperationValidationTest,PAD_V2)1824 TEST(OperationValidationTest, PAD_V2) {
1825 padV2OpTest(ANEURALNETWORKS_TENSOR_FLOAT16);
1826 padV2OpTest(ANEURALNETWORKS_TENSOR_FLOAT32);
1827 padV2OpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1828 padV2OpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1829 }
1830
softmaxOpTest(int32_t operandCode)1831 void softmaxOpTest(int32_t operandCode) {
1832 uint32_t inputDimensions[4] = {2, 2, 2, 2};
1833 ANeuralNetworksOperandType input = getOpType(operandCode, 4, inputDimensions);
1834
1835 ANeuralNetworksOperandType output = input;
1836 ANeuralNetworksOperandType beta = getOpType(ANEURALNETWORKS_FLOAT32);
1837 if (operandCode == ANEURALNETWORKS_TENSOR_FLOAT16) {
1838 beta = getOpType(ANEURALNETWORKS_FLOAT16);
1839 }
1840
1841 OperationTestBase softmaxTest(ANEURALNETWORKS_SOFTMAX, {input, beta}, {output},
1842 {{TensorRankConstraint::UpTo(4)}});
1843 softmaxTest.testOpsValidations();
1844
1845 ANeuralNetworksOperandType axis = getOpType(ANEURALNETWORKS_INT32);
1846 OperationTestBase softmaxAxisTest(ANEURALNETWORKS_SOFTMAX, {input, beta, axis}, {output},
1847 {{TensorRankConstraint::UpTo(4)}});
1848 softmaxAxisTest.testOpsValidations();
1849 }
1850
TEST(OperationValidationTest,SOFTMAX_float16)1851 TEST(OperationValidationTest, SOFTMAX_float16) {
1852 softmaxOpTest(ANEURALNETWORKS_TENSOR_FLOAT16);
1853 }
1854
TEST(OperationValidationTest,SOFTMAX_float32)1855 TEST(OperationValidationTest, SOFTMAX_float32) {
1856 softmaxOpTest(ANEURALNETWORKS_TENSOR_FLOAT32);
1857 }
1858
TEST(OperationValidationTest,SOFTMAX_quant8)1859 TEST(OperationValidationTest, SOFTMAX_quant8) {
1860 softmaxOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1861 }
1862
TEST(OperationValidationTest,SOFTMAX_quant8_signed)1863 TEST(OperationValidationTest, SOFTMAX_quant8_signed) {
1864 softmaxOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1865 }
1866
poolingOpTest(ANeuralNetworksOperationType operationCode,int32_t operandCode)1867 void poolingOpTest(ANeuralNetworksOperationType operationCode, int32_t operandCode) {
1868 uint32_t inputDimensions[4] = {2, 4, 4, 2};
1869 ANeuralNetworksOperandType input = getOpType(operandCode, 4, inputDimensions);
1870 ANeuralNetworksOperandType output = input;
1871
1872 ANeuralNetworksOperandType scalar = {.type = ANEURALNETWORKS_INT32,
1873 .dimensionCount = 0,
1874 .dimensions = nullptr,
1875 .scale = 0.0f,
1876 .zeroPoint = 0};
1877 ANeuralNetworksOperandType padLeft = scalar;
1878 ANeuralNetworksOperandType padRight = scalar;
1879 ANeuralNetworksOperandType padTop = scalar;
1880 ANeuralNetworksOperandType padBottom = scalar;
1881 ANeuralNetworksOperandType strideWidth = scalar;
1882 ANeuralNetworksOperandType strideHeight = scalar;
1883 ANeuralNetworksOperandType filterWidth = scalar;
1884 ANeuralNetworksOperandType filterHeight = scalar;
1885 ANeuralNetworksOperandType activation = scalar;
1886
1887 OperationTestBase explicitPoolingTest(operationCode,
1888 {input, padLeft, padRight, padTop, padBottom, strideWidth,
1889 strideHeight, filterWidth, filterHeight, activation},
1890 {output});
1891 explicitPoolingTest.testOpsValidations();
1892
1893 ANeuralNetworksOperandType padImplicit = scalar;
1894 OperationTestBase implicitPoolingTest(
1895 operationCode,
1896 {input, padImplicit, strideWidth, strideHeight, filterWidth, filterHeight, activation},
1897 {output});
1898 implicitPoolingTest.testOpsValidations();
1899
1900 ANeuralNetworksOperandType layout = {.type = ANEURALNETWORKS_BOOL,
1901 .dimensionCount = 0,
1902 .dimensions = nullptr,
1903 .scale = 0.0f,
1904 .zeroPoint = 0};
1905
1906 OperationTestBase explicitNchwPoolingTest(
1907 operationCode,
1908 {input, padLeft, padRight, padTop, padBottom, strideWidth, strideHeight, filterWidth,
1909 filterHeight, activation, layout},
1910 {output});
1911 explicitNchwPoolingTest.testOpsValidations();
1912
1913 OperationTestBase implicitNchwPoolingTest(operationCode,
1914 {input, padImplicit, strideWidth, strideHeight,
1915 filterWidth, filterHeight, activation, layout},
1916 {output});
1917 implicitNchwPoolingTest.testOpsValidations();
1918 }
1919
TEST(OperationValidationTest,AVERAGE_POOL_2D_float16)1920 TEST(OperationValidationTest, AVERAGE_POOL_2D_float16) {
1921 poolingOpTest(ANEURALNETWORKS_AVERAGE_POOL_2D, ANEURALNETWORKS_TENSOR_FLOAT16);
1922 }
1923
TEST(OperationValidationTest,AVERAGE_POOL_2D_float32)1924 TEST(OperationValidationTest, AVERAGE_POOL_2D_float32) {
1925 poolingOpTest(ANEURALNETWORKS_AVERAGE_POOL_2D, ANEURALNETWORKS_TENSOR_FLOAT32);
1926 }
1927
TEST(OperationValidationTest,AVERAGE_POOL_2D_quant8)1928 TEST(OperationValidationTest, AVERAGE_POOL_2D_quant8) {
1929 poolingOpTest(ANEURALNETWORKS_AVERAGE_POOL_2D, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1930 }
1931
TEST(OperationValidationTest,AVERAGE_POOL_2D_quant8_signed)1932 TEST(OperationValidationTest, AVERAGE_POOL_2D_quant8_signed) {
1933 poolingOpTest(ANEURALNETWORKS_AVERAGE_POOL_2D, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1934 }
1935
TEST(OperationValidationTest,MAX_POOL_2D_float32)1936 TEST(OperationValidationTest, MAX_POOL_2D_float32) {
1937 poolingOpTest(ANEURALNETWORKS_MAX_POOL_2D, ANEURALNETWORKS_TENSOR_FLOAT32);
1938 }
1939
TEST(OperationValidationTest,MAX_POOL_2D_float16)1940 TEST(OperationValidationTest, MAX_POOL_2D_float16) {
1941 poolingOpTest(ANEURALNETWORKS_MAX_POOL_2D, ANEURALNETWORKS_TENSOR_FLOAT16);
1942 }
1943
TEST(OperationValidationTest,MAX_POOL_2D_quant8)1944 TEST(OperationValidationTest, MAX_POOL_2D_quant8) {
1945 poolingOpTest(ANEURALNETWORKS_MAX_POOL_2D, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
1946 }
1947
TEST(OperationValidationTest,MAX_POOL_2D_quant8_signed)1948 TEST(OperationValidationTest, MAX_POOL_2D_quant8_signed) {
1949 poolingOpTest(ANEURALNETWORKS_MAX_POOL_2D, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
1950 }
1951
TEST(OperationValidationTest,L2_POOL_2D_float16)1952 TEST(OperationValidationTest, L2_POOL_2D_float16) {
1953 poolingOpTest(ANEURALNETWORKS_L2_POOL_2D, ANEURALNETWORKS_TENSOR_FLOAT16);
1954 }
1955
TEST(OperationValidationTest,L2_POOL_2D_float32)1956 TEST(OperationValidationTest, L2_POOL_2D_float32) {
1957 poolingOpTest(ANEURALNETWORKS_L2_POOL_2D, ANEURALNETWORKS_TENSOR_FLOAT32);
1958 }
1959
spaceDepthOpTest(ANeuralNetworksOperationType operationCode,int32_t operandCode)1960 void spaceDepthOpTest(ANeuralNetworksOperationType operationCode, int32_t operandCode) {
1961 uint32_t inputDimensions[4] = {2, 2, 2, 2};
1962 ANeuralNetworksOperandType input = getOpType(operandCode, 4, inputDimensions);
1963
1964 ANeuralNetworksOperandType block_size = {.type = ANEURALNETWORKS_INT32,
1965 .dimensionCount = 0,
1966 .dimensions = nullptr,
1967 .scale = 0.0f,
1968 .zeroPoint = 0};
1969 ANeuralNetworksOperandType output = input;
1970
1971 OperationTestBase spaceDepthTest(operationCode, {input, block_size}, {output});
1972 spaceDepthTest.testOpsValidations();
1973
1974 ANeuralNetworksOperandType layout = {.type = ANEURALNETWORKS_BOOL,
1975 .dimensionCount = 0,
1976 .dimensions = nullptr,
1977 .scale = 0.0f,
1978 .zeroPoint = 0};
1979 OperationTestBase spaceDepthNchwTest(operationCode, {input, block_size, layout}, {output});
1980 spaceDepthNchwTest.testOpsValidations();
1981 }
1982
TEST(OperationValidationTest,SPACE_TO_DEPTH_float16)1983 TEST(OperationValidationTest, SPACE_TO_DEPTH_float16) {
1984 spaceDepthOpTest(ANEURALNETWORKS_SPACE_TO_DEPTH, ANEURALNETWORKS_TENSOR_FLOAT16);
1985 }
1986
TEST(OperationValidationTest,DEPTH_TO_SPACE_float16)1987 TEST(OperationValidationTest, DEPTH_TO_SPACE_float16) {
1988 spaceDepthOpTest(ANEURALNETWORKS_DEPTH_TO_SPACE, ANEURALNETWORKS_TENSOR_FLOAT16);
1989 }
1990
TEST(OperationValidationTest,SPACE_TO_DEPTH_float32)1991 TEST(OperationValidationTest, SPACE_TO_DEPTH_float32) {
1992 spaceDepthOpTest(ANEURALNETWORKS_SPACE_TO_DEPTH, ANEURALNETWORKS_TENSOR_FLOAT32);
1993 }
1994
TEST(OperationValidationTest,DEPTH_TO_SPACE_float32)1995 TEST(OperationValidationTest, DEPTH_TO_SPACE_float32) {
1996 spaceDepthOpTest(ANEURALNETWORKS_DEPTH_TO_SPACE, ANEURALNETWORKS_TENSOR_FLOAT32);
1997 }
1998
TEST(OperationValidationTest,SPACE_TO_DEPTH_quant8)1999 TEST(OperationValidationTest, SPACE_TO_DEPTH_quant8) {
2000 spaceDepthOpTest(ANEURALNETWORKS_SPACE_TO_DEPTH, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
2001 }
2002
TEST(OperationValidationTest,DEPTH_TO_SPACE_quant8)2003 TEST(OperationValidationTest, DEPTH_TO_SPACE_quant8) {
2004 spaceDepthOpTest(ANEURALNETWORKS_DEPTH_TO_SPACE, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
2005 }
2006
TEST(OperationValidationTest,SPACE_TO_DEPTH_quant8signed)2007 TEST(OperationValidationTest, SPACE_TO_DEPTH_quant8signed) {
2008 spaceDepthOpTest(ANEURALNETWORKS_SPACE_TO_DEPTH, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
2009 }
2010
TEST(OperationValidationTest,DEPTH_TO_SPACE_quant8signed)2011 TEST(OperationValidationTest, DEPTH_TO_SPACE_quant8signed) {
2012 spaceDepthOpTest(ANEURALNETWORKS_DEPTH_TO_SPACE, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
2013 }
2014
spaceBatchOpTest(ANeuralNetworksOperationType operationCode,int32_t operandCode)2015 void spaceBatchOpTest(ANeuralNetworksOperationType operationCode, int32_t operandCode) {
2016 uint32_t inputDimensions[4] = {2, 2, 2, 2};
2017 ANeuralNetworksOperandType input = getOpType(operandCode, 4, inputDimensions);
2018
2019 uint32_t blockDimensions[1] = {2};
2020 ANeuralNetworksOperandType blockShape = {.type = ANEURALNETWORKS_TENSOR_INT32,
2021 .dimensionCount = 1,
2022 .dimensions = blockDimensions,
2023 .scale = 0.0f,
2024 .zeroPoint = 0};
2025 ANeuralNetworksOperandType layout = {.type = ANEURALNETWORKS_BOOL,
2026 .dimensionCount = 0,
2027 .dimensions = nullptr,
2028 .scale = 0.0f,
2029 .zeroPoint = 0};
2030
2031 ANeuralNetworksOperandType padding = blockShape;
2032 ANeuralNetworksOperandType output = input;
2033 if (operationCode == ANEURALNETWORKS_SPACE_TO_BATCH_ND) {
2034 OperationTestBase spaceBatchTest(operationCode, {input, blockShape, padding}, {output});
2035 spaceBatchTest.testOpsValidations();
2036
2037 OperationTestBase spaceBatchNchwTest(operationCode, {input, blockShape, padding, layout},
2038 {output});
2039 spaceBatchNchwTest.testOpsValidations();
2040 } else {
2041 OperationTestBase spaceBatchTest(operationCode, {input, blockShape}, {output});
2042 spaceBatchTest.testOpsValidations();
2043
2044 OperationTestBase spaceBatchNchwTest(operationCode, {input, blockShape, layout}, {output});
2045 spaceBatchNchwTest.testOpsValidations();
2046 }
2047 }
2048
TEST(OperationValidationTest,SPACE_TO_BATCH_ND_float16)2049 TEST(OperationValidationTest, SPACE_TO_BATCH_ND_float16) {
2050 spaceBatchOpTest(ANEURALNETWORKS_SPACE_TO_BATCH_ND, ANEURALNETWORKS_TENSOR_FLOAT16);
2051 }
2052
TEST(OperationValidationTest,BATCH_TO_SPACE_ND_float16)2053 TEST(OperationValidationTest, BATCH_TO_SPACE_ND_float16) {
2054 spaceBatchOpTest(ANEURALNETWORKS_BATCH_TO_SPACE_ND, ANEURALNETWORKS_TENSOR_FLOAT16);
2055 }
2056
TEST(OperationValidationTest,SPACE_TO_BATCH_ND_float32)2057 TEST(OperationValidationTest, SPACE_TO_BATCH_ND_float32) {
2058 spaceBatchOpTest(ANEURALNETWORKS_SPACE_TO_BATCH_ND, ANEURALNETWORKS_TENSOR_FLOAT32);
2059 }
2060
TEST(OperationValidationTest,BATCH_TO_SPACE_ND_float32)2061 TEST(OperationValidationTest, BATCH_TO_SPACE_ND_float32) {
2062 spaceBatchOpTest(ANEURALNETWORKS_BATCH_TO_SPACE_ND, ANEURALNETWORKS_TENSOR_FLOAT32);
2063 }
2064
TEST(OperationValidationTest,SPACE_TO_BATCH_ND_quant8)2065 TEST(OperationValidationTest, SPACE_TO_BATCH_ND_quant8) {
2066 spaceBatchOpTest(ANEURALNETWORKS_SPACE_TO_BATCH_ND, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
2067 }
2068
TEST(OperationValidationTest,BATCH_TO_SPACE_ND_quant8)2069 TEST(OperationValidationTest, BATCH_TO_SPACE_ND_quant8) {
2070 spaceBatchOpTest(ANEURALNETWORKS_BATCH_TO_SPACE_ND, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
2071 }
2072
TEST(OperationValidationTest,SPACE_TO_BATCH_ND_quant8signed)2073 TEST(OperationValidationTest, SPACE_TO_BATCH_ND_quant8signed) {
2074 spaceBatchOpTest(ANEURALNETWORKS_SPACE_TO_BATCH_ND, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
2075 }
2076
TEST(OperationValidationTest,BATCH_TO_SPACE_ND_quant8signed)2077 TEST(OperationValidationTest, BATCH_TO_SPACE_ND_quant8signed) {
2078 spaceBatchOpTest(ANEURALNETWORKS_BATCH_TO_SPACE_ND, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
2079 }
2080
transposeAndSqueezeOpTest(ANeuralNetworksOperationType operationCode,int32_t operandCode)2081 void transposeAndSqueezeOpTest(ANeuralNetworksOperationType operationCode, int32_t operandCode) {
2082 uint32_t inputDimensions[4] = {2, 2, 2, 2};
2083 ANeuralNetworksOperandType input = getOpType(operandCode, 4, inputDimensions);
2084
2085 uint32_t blockDimensions[1] = {4};
2086 ANeuralNetworksOperandType dims = {.type = ANEURALNETWORKS_TENSOR_INT32,
2087 .dimensionCount = 1,
2088 .dimensions = blockDimensions,
2089 .scale = 0.0f,
2090 .zeroPoint = 0};
2091
2092 ANeuralNetworksOperandType output = input;
2093 OperationTestBase transposeAndSqueezeTest(operationCode, {input, dims}, {output},
2094 {{TensorRankConstraint::UpTo(4)}});
2095 transposeAndSqueezeTest.testOpsValidations();
2096 }
2097
TEST(OperationValidationTest,TRANSPOSE_float16)2098 TEST(OperationValidationTest, TRANSPOSE_float16) {
2099 transposeAndSqueezeOpTest(ANEURALNETWORKS_TRANSPOSE, ANEURALNETWORKS_TENSOR_FLOAT16);
2100 }
2101
TEST(OperationValidationTest,SQUEEZE_float16)2102 TEST(OperationValidationTest, SQUEEZE_float16) {
2103 transposeAndSqueezeOpTest(ANEURALNETWORKS_SQUEEZE, ANEURALNETWORKS_TENSOR_FLOAT16);
2104 }
2105
TEST(OperationValidationTest,TRANSPOSE_float32)2106 TEST(OperationValidationTest, TRANSPOSE_float32) {
2107 transposeAndSqueezeOpTest(ANEURALNETWORKS_TRANSPOSE, ANEURALNETWORKS_TENSOR_FLOAT32);
2108 }
2109
TEST(OperationValidationTest,SQUEEZE_float32)2110 TEST(OperationValidationTest, SQUEEZE_float32) {
2111 transposeAndSqueezeOpTest(ANEURALNETWORKS_SQUEEZE, ANEURALNETWORKS_TENSOR_FLOAT32);
2112 }
2113
TEST(OperationValidationTest,TRANSPOSE_quant8)2114 TEST(OperationValidationTest, TRANSPOSE_quant8) {
2115 transposeAndSqueezeOpTest(ANEURALNETWORKS_TRANSPOSE, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
2116 }
2117
TEST(OperationValidationTest,TRANSPOSE_quant8signed)2118 TEST(OperationValidationTest, TRANSPOSE_quant8signed) {
2119 transposeAndSqueezeOpTest(ANEURALNETWORKS_TRANSPOSE,
2120 ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
2121 }
2122
TEST(OperationValidationTest,SQUEEZE_quant8)2123 TEST(OperationValidationTest, SQUEEZE_quant8) {
2124 transposeAndSqueezeOpTest(ANEURALNETWORKS_SQUEEZE, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
2125 }
2126
TEST(OperationValidationTest,SQUEEZE_quant8_signed)2127 TEST(OperationValidationTest, SQUEEZE_quant8_signed) {
2128 transposeAndSqueezeOpTest(ANEURALNETWORKS_SQUEEZE, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
2129 }
2130
convOpTest(int32_t inputOperandCode,int32_t filterOperandCode)2131 void convOpTest(int32_t inputOperandCode, int32_t filterOperandCode) {
2132 uint32_t inputDimensions[4] = {2, 4, 4, 2};
2133 ANeuralNetworksOperandType input = getOpType(inputOperandCode, 4, inputDimensions);
2134 ANeuralNetworksOperandType output = input;
2135
2136 float filterScales[2] = {0.5f, 1.0f};
2137 ANeuralNetworksOperandType filter = getOpType(filterOperandCode, 4, inputDimensions);
2138 ANeuralNetworksSymmPerChannelQuantParams filterChannelQuantParams = {
2139 .channelDim = 0,
2140 .scaleCount = 2,
2141 .scales = filterScales,
2142 };
2143
2144 uint32_t biasDimensions[1] = {2};
2145 ANeuralNetworksOperandType bias = {.type = inputOperandCode,
2146 .dimensionCount = 1,
2147 .dimensions = biasDimensions,
2148 .scale = 0.0f,
2149 .zeroPoint = 0};
2150 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM) {
2151 bias.type = ANEURALNETWORKS_TENSOR_INT32;
2152 bias.scale = 0.25f;
2153 }
2154 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED) {
2155 bias.type = ANEURALNETWORKS_TENSOR_INT32;
2156 bias.scale = 0.25f;
2157 }
2158 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
2159 bias.type = ANEURALNETWORKS_TENSOR_INT32;
2160 bias.scale = 0.0f;
2161 }
2162
2163 ANeuralNetworksOperandType scalar = {.type = ANEURALNETWORKS_INT32,
2164 .dimensionCount = 0,
2165 .dimensions = nullptr,
2166 .scale = 0.0f,
2167 .zeroPoint = 0};
2168 ANeuralNetworksOperandType padLeft = scalar;
2169 ANeuralNetworksOperandType padRight = scalar;
2170 ANeuralNetworksOperandType padTop = scalar;
2171 ANeuralNetworksOperandType padBottom = scalar;
2172 ANeuralNetworksOperandType strideWidth = scalar;
2173 ANeuralNetworksOperandType strideHeight = scalar;
2174 ANeuralNetworksOperandType dilationHeightFactor = scalar;
2175 ANeuralNetworksOperandType dilationWidthFactor = scalar;
2176 ANeuralNetworksOperandType activation = scalar;
2177
2178 OperationTestBase explicitConvTest(ANEURALNETWORKS_CONV_2D,
2179 {input, filter, bias, padLeft, padRight, padTop, padBottom,
2180 strideWidth, strideHeight, activation},
2181 {output});
2182 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
2183 explicitConvTest.setInputSymmPerChannelQuantParams(1, filterChannelQuantParams);
2184 }
2185 explicitConvTest.testOpsValidations();
2186
2187 ANeuralNetworksOperandType padImplicit = scalar;
2188 OperationTestBase implicitConvTest(
2189 ANEURALNETWORKS_CONV_2D,
2190 {input, filter, bias, padImplicit, strideWidth, strideHeight, activation}, {output},
2191 {{TensorRankConstraint::Exactly(4), {0, 1}}});
2192 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
2193 implicitConvTest.setInputSymmPerChannelQuantParams(1, filterChannelQuantParams);
2194 }
2195 implicitConvTest.testOpsValidations();
2196
2197 ANeuralNetworksOperandType layout = {.type = ANEURALNETWORKS_BOOL,
2198 .dimensionCount = 0,
2199 .dimensions = nullptr,
2200 .scale = 0.0f,
2201 .zeroPoint = 0};
2202
2203 OperationTestBase explicitNchwConvTest(
2204 ANEURALNETWORKS_CONV_2D,
2205 {input, filter, bias, padLeft, padRight, padTop, padBottom, strideWidth, strideHeight,
2206 activation, layout},
2207 {output});
2208 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
2209 explicitNchwConvTest.setInputSymmPerChannelQuantParams(1, filterChannelQuantParams);
2210 }
2211 explicitNchwConvTest.testOpsValidations();
2212
2213 OperationTestBase implicitNchwConvTest(
2214 ANEURALNETWORKS_CONV_2D,
2215 {input, filter, bias, padImplicit, strideWidth, strideHeight, activation, layout},
2216 {output});
2217 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
2218 implicitNchwConvTest.setInputSymmPerChannelQuantParams(1, filterChannelQuantParams);
2219 }
2220 implicitNchwConvTest.testOpsValidations();
2221
2222 OperationTestBase explicitDilateConvTest(
2223 ANEURALNETWORKS_CONV_2D,
2224 {input, filter, bias, padLeft, padRight, padTop, padBottom, strideWidth, strideHeight,
2225 activation, layout, dilationWidthFactor, dilationHeightFactor},
2226 {output});
2227 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
2228 explicitDilateConvTest.setInputSymmPerChannelQuantParams(1, filterChannelQuantParams);
2229 }
2230 explicitDilateConvTest.testOpsValidations();
2231
2232 OperationTestBase implicitDilateConvTest(
2233 ANEURALNETWORKS_CONV_2D,
2234 {input, filter, bias, padImplicit, strideWidth, strideHeight, activation, layout,
2235 dilationWidthFactor, dilationHeightFactor},
2236 {output});
2237 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
2238 implicitDilateConvTest.setInputSymmPerChannelQuantParams(1, filterChannelQuantParams);
2239 }
2240 implicitDilateConvTest.testOpsValidations();
2241 }
2242
TEST(OperationValidationTest,CONV_2D_float16)2243 TEST(OperationValidationTest, CONV_2D_float16) {
2244 convOpTest(ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_TENSOR_FLOAT16);
2245 }
2246
TEST(OperationValidationTest,CONV_2D_float32)2247 TEST(OperationValidationTest, CONV_2D_float32) {
2248 convOpTest(ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_TENSOR_FLOAT32);
2249 }
2250
TEST(OperationValidationTest,CONV_2D_quant8)2251 TEST(OperationValidationTest, CONV_2D_quant8) {
2252 convOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
2253 }
2254
TEST(OperationValidationTest,CONV_2D_quant8_per_channel)2255 TEST(OperationValidationTest, CONV_2D_quant8_per_channel) {
2256 convOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL);
2257 }
2258
TEST(OperationValidationTest,CONV_2D_quant8_signed)2259 TEST(OperationValidationTest, CONV_2D_quant8_signed) {
2260 convOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED,
2261 ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
2262 }
2263
TEST(OperationValidationTest,CONV_2D_quant8_signed_per_channel)2264 TEST(OperationValidationTest, CONV_2D_quant8_signed_per_channel) {
2265 convOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED,
2266 ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL);
2267 }
2268
depthwiseConvOpTest(int32_t inputOperandCode,int32_t filterOperandCode)2269 void depthwiseConvOpTest(int32_t inputOperandCode, int32_t filterOperandCode) {
2270 uint32_t inputDimensions[4] = {1, 2, 2, 2};
2271 ANeuralNetworksOperandType input = getOpType(inputOperandCode, 4, inputDimensions);
2272 ANeuralNetworksOperandType output = input;
2273
2274 float filterScales[2] = {0.5f, 1.0f};
2275 ANeuralNetworksOperandType filter = getOpType(filterOperandCode, 4, inputDimensions);
2276 ANeuralNetworksSymmPerChannelQuantParams filterChannelQuantParams = {
2277 .channelDim = 3,
2278 .scaleCount = 2,
2279 .scales = filterScales,
2280 };
2281
2282 uint32_t biasDimensions[1] = {2};
2283 ANeuralNetworksOperandType bias = {.type = inputOperandCode,
2284 .dimensionCount = 1,
2285 .dimensions = biasDimensions,
2286 .scale = 0.0f,
2287 .zeroPoint = 0};
2288 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM ||
2289 filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED) {
2290 bias.type = ANEURALNETWORKS_TENSOR_INT32;
2291 bias.scale = 0.25f;
2292 }
2293 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
2294 bias.type = ANEURALNETWORKS_TENSOR_INT32;
2295 bias.scale = 0.0f;
2296 }
2297
2298 ANeuralNetworksOperandType scalar = {.type = ANEURALNETWORKS_INT32,
2299 .dimensionCount = 0,
2300 .dimensions = nullptr,
2301 .scale = 0.0f,
2302 .zeroPoint = 0};
2303 ANeuralNetworksOperandType padLeft = scalar;
2304 ANeuralNetworksOperandType padRight = scalar;
2305 ANeuralNetworksOperandType padTop = scalar;
2306 ANeuralNetworksOperandType padBottom = scalar;
2307 ANeuralNetworksOperandType strideWidth = scalar;
2308 ANeuralNetworksOperandType strideHeight = scalar;
2309 ANeuralNetworksOperandType multiplier = scalar;
2310 ANeuralNetworksOperandType activation = scalar;
2311
2312 OperationTestBase explicitDepthwiseConvTest(
2313 ANEURALNETWORKS_DEPTHWISE_CONV_2D,
2314 {input, filter, bias, padLeft, padRight, padTop, padBottom, strideWidth, strideHeight,
2315 multiplier, activation},
2316 {output});
2317 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
2318 explicitDepthwiseConvTest.setInputSymmPerChannelQuantParams(1, filterChannelQuantParams);
2319 }
2320 explicitDepthwiseConvTest.testOpsValidations();
2321
2322 ANeuralNetworksOperandType padImplicit = scalar;
2323 OperationTestBase implicitDepthwiseConvTest(
2324 ANEURALNETWORKS_DEPTHWISE_CONV_2D,
2325 {input, filter, bias, padImplicit, strideWidth, strideHeight, multiplier, activation},
2326 {output});
2327 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
2328 implicitDepthwiseConvTest.setInputSymmPerChannelQuantParams(1, filterChannelQuantParams);
2329 }
2330 implicitDepthwiseConvTest.testOpsValidations();
2331
2332 ANeuralNetworksOperandType layout = {.type = ANEURALNETWORKS_BOOL,
2333 .dimensionCount = 0,
2334 .dimensions = nullptr,
2335 .scale = 0.0f,
2336 .zeroPoint = 0};
2337
2338 OperationTestBase explicitNchwDepthwiseConvTest(
2339 ANEURALNETWORKS_DEPTHWISE_CONV_2D,
2340 {input, filter, bias, padLeft, padRight, padTop, padBottom, strideWidth, strideHeight,
2341 multiplier, activation, layout},
2342 {output});
2343 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
2344 explicitNchwDepthwiseConvTest.setInputSymmPerChannelQuantParams(1,
2345 filterChannelQuantParams);
2346 }
2347 explicitNchwDepthwiseConvTest.testOpsValidations();
2348
2349 OperationTestBase implicitNchwDepthwiseConvTest(ANEURALNETWORKS_DEPTHWISE_CONV_2D,
2350 {input, filter, bias, padImplicit, strideWidth,
2351 strideHeight, multiplier, activation, layout},
2352 {output});
2353 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
2354 implicitNchwDepthwiseConvTest.setInputSymmPerChannelQuantParams(1,
2355 filterChannelQuantParams);
2356 }
2357 implicitNchwDepthwiseConvTest.testOpsValidations();
2358
2359 ANeuralNetworksOperandType dilationHeightFactor = scalar;
2360 ANeuralNetworksOperandType dilationWidthFactor = scalar;
2361
2362 OperationTestBase explicitDilationDepthwiseConvTest(
2363 ANEURALNETWORKS_DEPTHWISE_CONV_2D,
2364 {input, filter, bias, padLeft, padRight, padTop, padBottom, strideWidth, strideHeight,
2365 multiplier, activation, layout, dilationWidthFactor, dilationHeightFactor},
2366 {output});
2367 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
2368 explicitDilationDepthwiseConvTest.setInputSymmPerChannelQuantParams(
2369 1, filterChannelQuantParams);
2370 }
2371 explicitDilationDepthwiseConvTest.testOpsValidations();
2372
2373 OperationTestBase implicitDilationDepthwiseConvTest(
2374 ANEURALNETWORKS_DEPTHWISE_CONV_2D,
2375 {input, filter, bias, padImplicit, strideWidth, strideHeight, multiplier, activation,
2376 layout, dilationWidthFactor, dilationHeightFactor},
2377 {output});
2378 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
2379 implicitDilationDepthwiseConvTest.setInputSymmPerChannelQuantParams(
2380 1, filterChannelQuantParams);
2381 }
2382 implicitDilationDepthwiseConvTest.testOpsValidations();
2383 }
2384
TEST(OperationValidationTest,DEPTHWISE_CONV_2D_float32)2385 TEST(OperationValidationTest, DEPTHWISE_CONV_2D_float32) {
2386 depthwiseConvOpTest(ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_TENSOR_FLOAT32);
2387 }
2388
TEST(OperationValidationTest,DEPTHWISE_CONV_2D_float16)2389 TEST(OperationValidationTest, DEPTHWISE_CONV_2D_float16) {
2390 depthwiseConvOpTest(ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_TENSOR_FLOAT16);
2391 }
2392
TEST(OperationValidationTest,DEPTHWISE_CONV_2D_quant8)2393 TEST(OperationValidationTest, DEPTHWISE_CONV_2D_quant8) {
2394 depthwiseConvOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
2395 }
2396
TEST(OperationValidationTest,DEPTHWISE_CONV_2D_quant8_per_channel)2397 TEST(OperationValidationTest, DEPTHWISE_CONV_2D_quant8_per_channel) {
2398 depthwiseConvOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM,
2399 ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL);
2400 }
2401
TEST(OperationValidationTest,DEPTHWISE_CONV_2D_quant8_signed)2402 TEST(OperationValidationTest, DEPTHWISE_CONV_2D_quant8_signed) {
2403 depthwiseConvOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED,
2404 ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
2405 }
2406
TEST(OperationValidationTest,DEPTHWISE_CONV_2D_quant8_signed_per_channel)2407 TEST(OperationValidationTest, DEPTHWISE_CONV_2D_quant8_signed_per_channel) {
2408 depthwiseConvOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED,
2409 ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL);
2410 }
2411
fullyConnectedOpTest(int32_t operandCode)2412 void fullyConnectedOpTest(int32_t operandCode) {
2413 uint32_t inputDimensions[2] = {5, 5};
2414 ANeuralNetworksOperandType input = getOpType(operandCode, 2, inputDimensions);
2415
2416 ANeuralNetworksOperandType weights = input;
2417 ANeuralNetworksOperandType output = input;
2418
2419 uint32_t biasDimensions[1] = {5};
2420 ANeuralNetworksOperandType bias = {.type = operandCode,
2421 .dimensionCount = 1,
2422 .dimensions = biasDimensions,
2423 .scale = 0.0f,
2424 .zeroPoint = 0};
2425 if (operandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM ||
2426 operandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED) {
2427 bias.type = ANEURALNETWORKS_TENSOR_INT32;
2428 bias.scale = 0.25f;
2429 }
2430
2431 ANeuralNetworksOperandType activation = {.type = ANEURALNETWORKS_INT32,
2432 .dimensionCount = 0,
2433 .dimensions = nullptr,
2434 .scale = 0.0f,
2435 .zeroPoint = 0};
2436
2437 OperationTestBase fullyConnectedTest(ANEURALNETWORKS_FULLY_CONNECTED,
2438 {input, weights, bias, activation}, {output},
2439 {{TensorRankConstraint::Between(2, 4), {0}},
2440 {TensorRankConstraint::Exactly(2), {1}},
2441 {TensorRankConstraint::Exactly(1), {2}}});
2442 fullyConnectedTest.testOpsValidations();
2443 }
2444
TEST(OperationValidationTest,FULLY_CONNECTED_float16)2445 TEST(OperationValidationTest, FULLY_CONNECTED_float16) {
2446 fullyConnectedOpTest(ANEURALNETWORKS_TENSOR_FLOAT16);
2447 }
2448
TEST(OperationValidationTest,FULLY_CONNECTED_float32)2449 TEST(OperationValidationTest, FULLY_CONNECTED_float32) {
2450 fullyConnectedOpTest(ANEURALNETWORKS_TENSOR_FLOAT32);
2451 }
2452
TEST(OperationValidationTest,FULLY_CONNECTED_quant8)2453 TEST(OperationValidationTest, FULLY_CONNECTED_quant8) {
2454 fullyConnectedOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
2455 }
2456
TEST(OperationValidationTest,FULLY_CONNECTED_quant8_signed)2457 TEST(OperationValidationTest, FULLY_CONNECTED_quant8_signed) {
2458 fullyConnectedOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
2459 }
2460
concatenationTest(int32_t operandCode)2461 void concatenationTest(int32_t operandCode) {
2462 uint32_t inputDimensions[2] = {5, 5};
2463 ANeuralNetworksOperandType input1 = getOpType(operandCode, 2, inputDimensions);
2464 ANeuralNetworksOperandType input2 = input1;
2465 ANeuralNetworksOperandType output = input1;
2466
2467 ANeuralNetworksOperandType activation = {.type = ANEURALNETWORKS_INT32,
2468 .dimensionCount = 0,
2469 .dimensions = nullptr,
2470 .scale = 0.0f,
2471 .zeroPoint = 0};
2472
2473 OperationTestBase concat2Test(ANEURALNETWORKS_CONCATENATION, {input1, input2, activation},
2474 {output}, {{TensorRankConstraint::UpTo(4), {0, 1}}});
2475 concat2Test.testOpsValidations();
2476
2477 OperationTestBase concat1Test(ANEURALNETWORKS_CONCATENATION, {input1, activation}, {output},
2478 {{TensorRankConstraint::UpTo(4)}});
2479 concat1Test.testOpsValidations();
2480 }
2481
TEST(OperationValidationTest,CONCATENATION_float16)2482 TEST(OperationValidationTest, CONCATENATION_float16) {
2483 concatenationTest(ANEURALNETWORKS_TENSOR_FLOAT16);
2484 }
2485
TEST(OperationValidationTest,CONCATENATION_float32)2486 TEST(OperationValidationTest, CONCATENATION_float32) {
2487 concatenationTest(ANEURALNETWORKS_TENSOR_FLOAT32);
2488 }
2489
TEST(OperationValidationTest,CONCATENATION_quant8)2490 TEST(OperationValidationTest, CONCATENATION_quant8) {
2491 concatenationTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
2492 }
2493
TEST(OperationValidationTest,CONCATENATION_quant8_signed)2494 TEST(OperationValidationTest, CONCATENATION_quant8_signed) {
2495 concatenationTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
2496 }
2497
resizeBilinearOpTest(int32_t inputOperandCode,int32_t scalarOperandCode)2498 void resizeBilinearOpTest(int32_t inputOperandCode, int32_t scalarOperandCode) {
2499 SCOPED_TRACE(inputOperandCode);
2500 uint32_t inputDimensions[4] = {2, 2, 2, 2};
2501 ANeuralNetworksOperandType input = getOpType(inputOperandCode, 4, inputDimensions);
2502 ANeuralNetworksOperandType height = getOpType(scalarOperandCode);
2503 ANeuralNetworksOperandType width = height;
2504 ANeuralNetworksOperandType output = input;
2505
2506 OperationTestBase resizeTest(ANEURALNETWORKS_RESIZE_BILINEAR, {input, height, width}, {output});
2507 resizeTest.testOpsValidations();
2508
2509 ANeuralNetworksOperandType layout = getOpType(ANEURALNETWORKS_BOOL);
2510 OperationTestBase resizeNchwTest(ANEURALNETWORKS_RESIZE_BILINEAR,
2511 {input, height, width, layout}, {output});
2512 resizeNchwTest.testOpsValidations();
2513 }
2514
TEST(OperationValidationTest,RESIZE_BILINEAR)2515 TEST(OperationValidationTest, RESIZE_BILINEAR) {
2516 resizeBilinearOpTest(ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_INT32);
2517 resizeBilinearOpTest(ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_INT32);
2518 resizeBilinearOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, ANEURALNETWORKS_INT32);
2519 resizeBilinearOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED, ANEURALNETWORKS_INT32);
2520 resizeBilinearOpTest(ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_FLOAT16);
2521 resizeBilinearOpTest(ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_FLOAT32);
2522 resizeBilinearOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, ANEURALNETWORKS_FLOAT32);
2523 resizeBilinearOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED, ANEURALNETWORKS_FLOAT32);
2524 }
2525
embeddingLookupTest(int32_t operandCode)2526 void embeddingLookupTest(int32_t operandCode) {
2527 uint32_t lookupDimensions[1] = {5};
2528 ANeuralNetworksOperandType lookup = {.type = ANEURALNETWORKS_TENSOR_INT32,
2529 .dimensionCount = 1,
2530 .dimensions = lookupDimensions,
2531 .scale = 0.0f,
2532 .zeroPoint = 0};
2533
2534 uint32_t inputDimensions[2] = {5, 5};
2535 ANeuralNetworksOperandType input = getOpType(operandCode, 2, inputDimensions);
2536 ANeuralNetworksOperandType output = input;
2537
2538 OperationTestBase embedLookupTest(ANEURALNETWORKS_EMBEDDING_LOOKUP, {lookup, input}, {output});
2539 embedLookupTest.testOpsValidations();
2540 }
2541
TEST(OperationValidationTest,EMBEDDING_LOOKUP_float32)2542 TEST(OperationValidationTest, EMBEDDING_LOOKUP_float32) {
2543 embeddingLookupTest(ANEURALNETWORKS_TENSOR_FLOAT32);
2544 }
2545
TEST(OperationValidationTest,EMBEDDING_LOOKUP_int32)2546 TEST(OperationValidationTest, EMBEDDING_LOOKUP_int32) {
2547 embeddingLookupTest(ANEURALNETWORKS_TENSOR_INT32);
2548 }
2549
TEST(OperationValidationTest,EMBEDDING_LOOKUP_quant8)2550 TEST(OperationValidationTest, EMBEDDING_LOOKUP_quant8) {
2551 embeddingLookupTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
2552 }
2553
TEST(OperationValidationTest,EMBEDDING_LOOKUP_quant8_signed)2554 TEST(OperationValidationTest, EMBEDDING_LOOKUP_quant8_signed) {
2555 embeddingLookupTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
2556 }
2557
hashtableLookupTest(int32_t operandCode)2558 void hashtableLookupTest(int32_t operandCode) {
2559 uint32_t lookupDimensions[1] = {5};
2560 ANeuralNetworksOperandType lookup = {.type = ANEURALNETWORKS_TENSOR_INT32,
2561 .dimensionCount = 1,
2562 .dimensions = lookupDimensions,
2563 .scale = 0.0f,
2564 .zeroPoint = 0};
2565 ANeuralNetworksOperandType keys = lookup;
2566
2567 uint32_t valuesDimensions[2] = {5, 5};
2568 ANeuralNetworksOperandType values = getOpType(operandCode, 2, valuesDimensions);
2569 ANeuralNetworksOperandType output = values;
2570
2571 ANeuralNetworksOperandType hits = lookup;
2572 hits.type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM;
2573 hits.scale = 1.0f;
2574
2575 OperationTestBase hashLookupTest(ANEURALNETWORKS_HASHTABLE_LOOKUP, {lookup, keys, values},
2576 {output, hits});
2577 hashLookupTest.testOpsValidations();
2578 }
2579
TEST(OperationValidationTest,HASHTABLE_LOOKUP_float32)2580 TEST(OperationValidationTest, HASHTABLE_LOOKUP_float32) {
2581 hashtableLookupTest(ANEURALNETWORKS_TENSOR_FLOAT32);
2582 }
2583
TEST(OperationValidationTest,HASHTABLE_LOOKUP_int32)2584 TEST(OperationValidationTest, HASHTABLE_LOOKUP_int32) {
2585 hashtableLookupTest(ANEURALNETWORKS_TENSOR_INT32);
2586 }
2587
TEST(OperationValidationTest,HASHTABLE_LOOKUP_quant8)2588 TEST(OperationValidationTest, HASHTABLE_LOOKUP_quant8) {
2589 hashtableLookupTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
2590 }
2591
lshProjectionTest(int32_t operandCode,int32_t hashAndWeightOperandCode)2592 void lshProjectionTest(int32_t operandCode, int32_t hashAndWeightOperandCode) {
2593 uint32_t inputDimensions[2] = {5, 5};
2594 ANeuralNetworksOperandType hash = getOpType(hashAndWeightOperandCode, 2, inputDimensions);
2595 ANeuralNetworksOperandType input = getOpType(operandCode, 2, inputDimensions);
2596
2597 uint32_t weightDimensions[1] = {5};
2598 ANeuralNetworksOperandType weight = getOpType(hashAndWeightOperandCode, 1, weightDimensions);
2599
2600 ANeuralNetworksOperandType type = {.type = ANEURALNETWORKS_INT32,
2601 .dimensionCount = 0,
2602 .dimensions = nullptr,
2603 .scale = 0.0f,
2604 .zeroPoint = 0};
2605
2606 ANeuralNetworksOperandType output = weight;
2607 output.type = ANEURALNETWORKS_TENSOR_INT32;
2608
2609 OperationTestBase lshProjTest(ANEURALNETWORKS_LSH_PROJECTION, {hash, input, weight, type},
2610 {output});
2611 lshProjTest.testOpsValidations();
2612 }
2613
TEST(OperationValidationTest,LSH_PROJECTION_float16)2614 TEST(OperationValidationTest, LSH_PROJECTION_float16) {
2615 lshProjectionTest(ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_TENSOR_FLOAT32);
2616 lshProjectionTest(ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_TENSOR_FLOAT16);
2617 }
2618
TEST(OperationValidationTest,LSH_PROJECTION_float32)2619 TEST(OperationValidationTest, LSH_PROJECTION_float32) {
2620 lshProjectionTest(ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_TENSOR_FLOAT32);
2621 lshProjectionTest(ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_TENSOR_FLOAT16);
2622 }
2623
TEST(OperationValidationTest,LSH_PROJECTION_quant8)2624 TEST(OperationValidationTest, LSH_PROJECTION_quant8) {
2625 lshProjectionTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, ANEURALNETWORKS_TENSOR_FLOAT32);
2626 lshProjectionTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, ANEURALNETWORKS_TENSOR_FLOAT16);
2627 }
2628
TEST(OperationValidationTest,LSH_PROJECTION_int32)2629 TEST(OperationValidationTest, LSH_PROJECTION_int32) {
2630 lshProjectionTest(ANEURALNETWORKS_TENSOR_INT32, ANEURALNETWORKS_TENSOR_FLOAT32);
2631 lshProjectionTest(ANEURALNETWORKS_TENSOR_INT32, ANEURALNETWORKS_TENSOR_FLOAT16);
2632 }
2633
TEST(OperationValidationTest,LSTM_float32)2634 TEST(OperationValidationTest, LSTM_float32) {
2635 uint32_t oneDimensional[1] = {5};
2636 uint32_t twoDimensional[2] = {5, 5};
2637 ANeuralNetworksOperandType floatTensor1D = {.type = ANEURALNETWORKS_TENSOR_FLOAT32,
2638 .dimensionCount = 1,
2639 .dimensions = oneDimensional,
2640 .scale = 0.0f,
2641 .zeroPoint = 0};
2642 ANeuralNetworksOperandType floatTensor2D = {.type = ANEURALNETWORKS_TENSOR_FLOAT32,
2643 .dimensionCount = 2,
2644 .dimensions = twoDimensional,
2645 .scale = 0.0f,
2646 .zeroPoint = 0};
2647 ANeuralNetworksOperandType intScalar = {.type = ANEURALNETWORKS_INT32,
2648 .dimensionCount = 0,
2649 .dimensions = nullptr,
2650 .scale = 0.0f,
2651 .zeroPoint = 0};
2652 ANeuralNetworksOperandType floatScalar = {.type = ANEURALNETWORKS_FLOAT32,
2653 .dimensionCount = 0,
2654 .dimensions = nullptr,
2655 .scale = 0.0f,
2656 .zeroPoint = 0};
2657
2658 ANeuralNetworksOperandType input = floatTensor2D;
2659 ANeuralNetworksOperandType inputToInput = floatTensor2D;
2660 ANeuralNetworksOperandType inputToForget = floatTensor2D;
2661 ANeuralNetworksOperandType inputToCell = floatTensor2D;
2662 ANeuralNetworksOperandType inputToOutput = floatTensor2D;
2663 ANeuralNetworksOperandType recurrentToInput = floatTensor2D;
2664 ANeuralNetworksOperandType recurrentToForget = floatTensor2D;
2665 ANeuralNetworksOperandType recurrentToCell = floatTensor2D;
2666 ANeuralNetworksOperandType recurrentToOutput = floatTensor2D;
2667 ANeuralNetworksOperandType cellToInput = floatTensor1D;
2668 ANeuralNetworksOperandType cellToForget = floatTensor1D;
2669 ANeuralNetworksOperandType cellToOutput = floatTensor1D;
2670 ANeuralNetworksOperandType inputGateBias = floatTensor1D;
2671 ANeuralNetworksOperandType forgetGateBias = floatTensor1D;
2672 ANeuralNetworksOperandType cellBias = floatTensor1D;
2673 ANeuralNetworksOperandType outputGateBias = floatTensor1D;
2674 ANeuralNetworksOperandType projWeights = floatTensor2D;
2675 ANeuralNetworksOperandType projBias = floatTensor1D;
2676 ANeuralNetworksOperandType outputStateIn = floatTensor2D;
2677 ANeuralNetworksOperandType cellStateIn = floatTensor2D;
2678 ANeuralNetworksOperandType activation = intScalar;
2679 ANeuralNetworksOperandType clipCellState = floatScalar;
2680 ANeuralNetworksOperandType clipProjLayer = floatScalar;
2681
2682 ANeuralNetworksOperandType scratch = floatTensor2D;
2683 ANeuralNetworksOperandType outputStateOut = floatTensor2D;
2684 ANeuralNetworksOperandType cellStateOut = floatTensor2D;
2685 ANeuralNetworksOperandType output = floatTensor2D;
2686
2687 OperationTestBase lstmTest(ANEURALNETWORKS_LSTM,
2688 {input,
2689 inputToInput,
2690 inputToForget,
2691 inputToCell,
2692 inputToOutput,
2693 recurrentToInput,
2694 recurrentToForget,
2695 recurrentToCell,
2696 recurrentToOutput,
2697 cellToInput,
2698 cellToForget,
2699 cellToOutput,
2700 inputGateBias,
2701 forgetGateBias,
2702 cellBias,
2703 outputGateBias,
2704 projWeights,
2705 projBias,
2706 outputStateIn,
2707 cellStateIn,
2708 activation,
2709 clipCellState,
2710 clipProjLayer},
2711 {scratch, outputStateOut, cellStateOut, output});
2712 lstmTest.testOpsValidations();
2713 }
2714
lstmTestV1_2(int32_t operandCode)2715 void lstmTestV1_2(int32_t operandCode) {
2716 SCOPED_TRACE(operandCode);
2717 uint32_t oneDimensional[1] = {5};
2718 uint32_t twoDimensional[2] = {5, 5};
2719 ANeuralNetworksOperandType floatTensor1D = {.type = operandCode,
2720 .dimensionCount = 1,
2721 .dimensions = oneDimensional,
2722 .scale = 0.0f,
2723 .zeroPoint = 0};
2724 ANeuralNetworksOperandType floatTensor2D = {.type = operandCode,
2725 .dimensionCount = 2,
2726 .dimensions = twoDimensional,
2727 .scale = 0.0f,
2728 .zeroPoint = 0};
2729 ANeuralNetworksOperandType intScalar = {.type = ANEURALNETWORKS_INT32,
2730 .dimensionCount = 0,
2731 .dimensions = nullptr,
2732 .scale = 0.0f,
2733 .zeroPoint = 0};
2734 ANeuralNetworksOperandType floatScalar = {
2735 .type = (operandCode == ANEURALNETWORKS_TENSOR_FLOAT32) ? ANEURALNETWORKS_FLOAT32
2736 : ANEURALNETWORKS_FLOAT16,
2737 .dimensionCount = 0,
2738 .dimensions = nullptr,
2739 .scale = 0.0f,
2740 .zeroPoint = 0};
2741
2742 ANeuralNetworksOperandType input = floatTensor2D;
2743 ANeuralNetworksOperandType inputToInput = floatTensor2D;
2744 ANeuralNetworksOperandType inputToForget = floatTensor2D;
2745 ANeuralNetworksOperandType inputToCell = floatTensor2D;
2746 ANeuralNetworksOperandType inputToOutput = floatTensor2D;
2747 ANeuralNetworksOperandType recurrentToInput = floatTensor2D;
2748 ANeuralNetworksOperandType recurrentToForget = floatTensor2D;
2749 ANeuralNetworksOperandType recurrentToCell = floatTensor2D;
2750 ANeuralNetworksOperandType recurrentToOutput = floatTensor2D;
2751 ANeuralNetworksOperandType cellToInput = floatTensor1D;
2752 ANeuralNetworksOperandType cellToForget = floatTensor1D;
2753 ANeuralNetworksOperandType cellToOutput = floatTensor1D;
2754 ANeuralNetworksOperandType inputGateBias = floatTensor1D;
2755 ANeuralNetworksOperandType forgetGateBias = floatTensor1D;
2756 ANeuralNetworksOperandType cellBias = floatTensor1D;
2757 ANeuralNetworksOperandType outputGateBias = floatTensor1D;
2758 ANeuralNetworksOperandType projWeights = floatTensor2D;
2759 ANeuralNetworksOperandType projBias = floatTensor1D;
2760 ANeuralNetworksOperandType outputStateIn = floatTensor2D;
2761 ANeuralNetworksOperandType cellStateIn = floatTensor2D;
2762 ANeuralNetworksOperandType activation = intScalar;
2763 ANeuralNetworksOperandType clipCellState = floatScalar;
2764 ANeuralNetworksOperandType clipProjLayer = floatScalar;
2765 ANeuralNetworksOperandType inputLayerNormWeights = floatTensor1D;
2766 ANeuralNetworksOperandType forgetLayerNormWeights = floatTensor1D;
2767 ANeuralNetworksOperandType cellLayerNormWeights = floatTensor1D;
2768 ANeuralNetworksOperandType outputLayerNormWeights = floatTensor1D;
2769
2770 ANeuralNetworksOperandType scratch = floatTensor2D;
2771 ANeuralNetworksOperandType outputStateOut = floatTensor2D;
2772 ANeuralNetworksOperandType cellStateOut = floatTensor2D;
2773 ANeuralNetworksOperandType output = floatTensor2D;
2774
2775 OperationTestBase lstmTest(ANEURALNETWORKS_LSTM,
2776 {input,
2777 inputToInput,
2778 inputToForget,
2779 inputToCell,
2780 inputToOutput,
2781 recurrentToInput,
2782 recurrentToForget,
2783 recurrentToCell,
2784 recurrentToOutput,
2785 cellToInput,
2786 cellToForget,
2787 cellToOutput,
2788 inputGateBias,
2789 forgetGateBias,
2790 cellBias,
2791 outputGateBias,
2792 projWeights,
2793 projBias,
2794 outputStateIn,
2795 cellStateIn,
2796 activation,
2797 clipCellState,
2798 clipProjLayer,
2799 inputLayerNormWeights,
2800 forgetLayerNormWeights,
2801 cellLayerNormWeights,
2802 outputLayerNormWeights},
2803 {scratch, outputStateOut, cellStateOut, output});
2804 lstmTest.testOpsValidations();
2805 }
2806
TEST(OperationValidationTest,LSTM_V1_2)2807 TEST(OperationValidationTest, LSTM_V1_2) {
2808 lstmTestV1_2(ANEURALNETWORKS_TENSOR_FLOAT32);
2809 lstmTestV1_2(ANEURALNETWORKS_TENSOR_FLOAT16);
2810 }
2811
lstmBidirectionalSequence(int32_t operandCode)2812 void lstmBidirectionalSequence(int32_t operandCode) {
2813 uint32_t oneDimensional[1] = {5};
2814 uint32_t twoDimensional[2] = {5, 5};
2815 uint32_t threeDimensional[3] = {5, 5, 5};
2816 ANeuralNetworksOperandType floatTensor1D = {
2817 .type = operandCode,
2818 .dimensionCount = 1,
2819 .dimensions = oneDimensional,
2820 .scale = 0.0f,
2821 .zeroPoint = 0,
2822 };
2823 ANeuralNetworksOperandType floatTensor2D = {
2824 .type = operandCode,
2825 .dimensionCount = 2,
2826 .dimensions = twoDimensional,
2827 .scale = 0.0f,
2828 .zeroPoint = 0,
2829 };
2830 ANeuralNetworksOperandType floatTensor3D = {
2831 .type = operandCode,
2832 .dimensionCount = 3,
2833 .dimensions = threeDimensional,
2834 .scale = 0.0f,
2835 .zeroPoint = 0,
2836 };
2837 ANeuralNetworksOperandType intScalar = {
2838 .type = ANEURALNETWORKS_INT32,
2839 .dimensionCount = 0,
2840 .dimensions = nullptr,
2841 .scale = 0.0f,
2842 .zeroPoint = 0,
2843 };
2844 ANeuralNetworksOperandType floatScalar = {
2845 .type = operandCode == ANEURALNETWORKS_TENSOR_FLOAT32 ? ANEURALNETWORKS_FLOAT32
2846 : ANEURALNETWORKS_FLOAT16,
2847 .dimensionCount = 0,
2848 .dimensions = nullptr,
2849 .scale = 0.0f,
2850 .zeroPoint = 0,
2851 };
2852 ANeuralNetworksOperandType boolScalar = {.type = ANEURALNETWORKS_BOOL,
2853 .dimensionCount = 0,
2854 .dimensions = nullptr,
2855 .scale = 0.0f,
2856 .zeroPoint = 0};
2857
2858 ANeuralNetworksOperandType input = floatTensor3D;
2859 ANeuralNetworksOperandType inputToInputFw = floatTensor2D;
2860 ANeuralNetworksOperandType inputToForgetFw = floatTensor2D;
2861 ANeuralNetworksOperandType inputToCellFw = floatTensor2D;
2862 ANeuralNetworksOperandType inputToOutputFw = floatTensor2D;
2863 ANeuralNetworksOperandType recurrentToInputFw = floatTensor2D;
2864 ANeuralNetworksOperandType recurrentToForgetFw = floatTensor2D;
2865 ANeuralNetworksOperandType recurrentToCellFw = floatTensor2D;
2866 ANeuralNetworksOperandType recurrentToOutputFw = floatTensor2D;
2867 ANeuralNetworksOperandType cellToInputFw = floatTensor1D;
2868 ANeuralNetworksOperandType cellToForgetFw = floatTensor1D;
2869 ANeuralNetworksOperandType cellToOutputFw = floatTensor1D;
2870 ANeuralNetworksOperandType inputGateBiasFw = floatTensor1D;
2871 ANeuralNetworksOperandType forgetGateBiasFw = floatTensor1D;
2872 ANeuralNetworksOperandType cellBiasFw = floatTensor1D;
2873 ANeuralNetworksOperandType outputGateBiasFw = floatTensor1D;
2874 ANeuralNetworksOperandType projWeightsFw = floatTensor2D;
2875 ANeuralNetworksOperandType projBiasFw = floatTensor1D;
2876 ANeuralNetworksOperandType outputStateInFw = floatTensor2D;
2877 ANeuralNetworksOperandType cellStateInFw = floatTensor2D;
2878 ANeuralNetworksOperandType inputToInputBw = floatTensor2D;
2879 ANeuralNetworksOperandType inputToForgetBw = floatTensor2D;
2880 ANeuralNetworksOperandType inputToCellBw = floatTensor2D;
2881 ANeuralNetworksOperandType inputToOutputBw = floatTensor2D;
2882 ANeuralNetworksOperandType recurrentToInputBw = floatTensor2D;
2883 ANeuralNetworksOperandType recurrentToForgetBw = floatTensor2D;
2884 ANeuralNetworksOperandType recurrentToCellBw = floatTensor2D;
2885 ANeuralNetworksOperandType recurrentToOutputBw = floatTensor2D;
2886 ANeuralNetworksOperandType cellToInputBw = floatTensor1D;
2887 ANeuralNetworksOperandType cellToForgetBw = floatTensor1D;
2888 ANeuralNetworksOperandType cellToOutputBw = floatTensor1D;
2889 ANeuralNetworksOperandType inputGateBiasBw = floatTensor1D;
2890 ANeuralNetworksOperandType forgetGateBiasBw = floatTensor1D;
2891 ANeuralNetworksOperandType cellBiasBw = floatTensor1D;
2892 ANeuralNetworksOperandType outputGateBiasBw = floatTensor1D;
2893 ANeuralNetworksOperandType projWeightsBw = floatTensor2D;
2894 ANeuralNetworksOperandType projBiasBw = floatTensor1D;
2895 ANeuralNetworksOperandType outputStateInBw = floatTensor2D;
2896 ANeuralNetworksOperandType cellStateInBw = floatTensor2D;
2897 ANeuralNetworksOperandType auxInput = floatTensor3D;
2898 ANeuralNetworksOperandType auxInputToInputFw = floatTensor2D;
2899 ANeuralNetworksOperandType auxInputToForgetFw = floatTensor2D;
2900 ANeuralNetworksOperandType auxInputToCellFw = floatTensor2D;
2901 ANeuralNetworksOperandType auxInputToOutputFw = floatTensor2D;
2902 ANeuralNetworksOperandType auxInputToInputBw = floatTensor2D;
2903 ANeuralNetworksOperandType auxInputToForgetBw = floatTensor2D;
2904 ANeuralNetworksOperandType auxInputToCellBw = floatTensor2D;
2905 ANeuralNetworksOperandType auxInputToOutputBw = floatTensor2D;
2906 ANeuralNetworksOperandType activation = intScalar;
2907 ANeuralNetworksOperandType clipCellState = floatScalar;
2908 ANeuralNetworksOperandType clipProjLayer = floatScalar;
2909 ANeuralNetworksOperandType mergeOutputs = boolScalar;
2910 ANeuralNetworksOperandType timeMajor = boolScalar;
2911 ANeuralNetworksOperandType inputLayerNormWeightsFw = floatTensor1D;
2912 ANeuralNetworksOperandType forgetLayerNormWeightsFw = floatTensor1D;
2913 ANeuralNetworksOperandType cellLayerNormWeightsFw = floatTensor1D;
2914 ANeuralNetworksOperandType outputLayerNormWeightsFw = floatTensor1D;
2915 ANeuralNetworksOperandType inputLayerNormWeightsBw = floatTensor1D;
2916 ANeuralNetworksOperandType forgetLayerNormWeightsBw = floatTensor1D;
2917 ANeuralNetworksOperandType cellLayerNormWeightsBw = floatTensor1D;
2918 ANeuralNetworksOperandType outputLayerNormWeightsBw = floatTensor1D;
2919
2920 ANeuralNetworksOperandType outputFw = floatTensor2D;
2921 ANeuralNetworksOperandType outputBw = floatTensor2D;
2922
2923 OperationTestBase lstmTest(ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM,
2924 {
2925 input,
2926 inputToInputFw,
2927 inputToForgetFw,
2928 inputToCellFw,
2929 inputToOutputFw,
2930 recurrentToInputFw,
2931 recurrentToForgetFw,
2932 recurrentToCellFw,
2933 recurrentToOutputFw,
2934 cellToInputFw,
2935 cellToForgetFw,
2936 cellToOutputFw,
2937 inputGateBiasFw,
2938 forgetGateBiasFw,
2939 cellBiasFw,
2940 outputGateBiasFw,
2941 projWeightsFw,
2942 projBiasFw,
2943 outputStateInFw,
2944 cellStateInFw,
2945 inputToInputBw,
2946 inputToForgetBw,
2947 inputToCellBw,
2948 inputToOutputBw,
2949 recurrentToInputBw,
2950 recurrentToForgetBw,
2951 recurrentToCellBw,
2952 recurrentToOutputBw,
2953 cellToInputBw,
2954 cellToForgetBw,
2955 cellToOutputBw,
2956 inputGateBiasBw,
2957 forgetGateBiasBw,
2958 cellBiasBw,
2959 outputGateBiasBw,
2960 projWeightsBw,
2961 projBiasBw,
2962 outputStateInBw,
2963 cellStateInBw,
2964 auxInput,
2965 auxInputToInputFw,
2966 auxInputToForgetFw,
2967 auxInputToCellFw,
2968 auxInputToOutputFw,
2969 auxInputToInputBw,
2970 auxInputToForgetBw,
2971 auxInputToCellBw,
2972 auxInputToOutputBw,
2973 activation,
2974 clipCellState,
2975 clipProjLayer,
2976 mergeOutputs,
2977 timeMajor,
2978 inputLayerNormWeightsFw,
2979 forgetLayerNormWeightsFw,
2980 cellLayerNormWeightsFw,
2981 outputLayerNormWeightsFw,
2982 inputLayerNormWeightsBw,
2983 forgetLayerNormWeightsBw,
2984 cellLayerNormWeightsBw,
2985 outputLayerNormWeightsBw,
2986 },
2987 {
2988 outputFw,
2989 outputBw,
2990 });
2991
2992 lstmTest.testOpsValidations();
2993 }
2994
TEST(OperationValidationTest,LSTM_BIDIRECTIONAL_SEQUENCE)2995 TEST(OperationValidationTest, LSTM_BIDIRECTIONAL_SEQUENCE) {
2996 lstmBidirectionalSequence(ANEURALNETWORKS_TENSOR_FLOAT32);
2997 lstmBidirectionalSequence(ANEURALNETWORKS_TENSOR_FLOAT16);
2998 }
2999
randomMultinomialOpTest(int32_t operandCode)3000 void randomMultinomialOpTest(int32_t operandCode) {
3001 uint32_t inputDims[2] = {5, 5};
3002 ANeuralNetworksOperandType input = {.type = operandCode,
3003 .dimensionCount = 2,
3004 .dimensions = inputDims,
3005 .scale = 0.0f,
3006 .zeroPoint = 0};
3007 ANeuralNetworksOperandType sample_count = {.type = ANEURALNETWORKS_INT32,
3008 .dimensionCount = 0,
3009 .dimensions = nullptr,
3010 .scale = 0.0f,
3011 .zeroPoint = 0};
3012 uint32_t seedDims[1] = {2};
3013 ANeuralNetworksOperandType seed = {.type = ANEURALNETWORKS_TENSOR_INT32,
3014 .dimensionCount = 1,
3015 .dimensions = seedDims,
3016 .scale = 0.0f,
3017 .zeroPoint = 0};
3018 uint32_t outputDims[2] = {5, 7};
3019 ANeuralNetworksOperandType output = {.type = ANEURALNETWORKS_TENSOR_INT32,
3020 .dimensionCount = 2,
3021 .dimensions = outputDims,
3022 .scale = 0.0f,
3023 .zeroPoint = 0};
3024
3025 OperationTestBase multinomialTest(ANEURALNETWORKS_RANDOM_MULTINOMIAL,
3026 {input, sample_count, seed}, {output});
3027 multinomialTest.testOpsValidations();
3028 }
3029
TEST(OperationValidationTest,RANDOM_MULTINOMIAL_float16)3030 TEST(OperationValidationTest, RANDOM_MULTINOMIAL_float16) {
3031 randomMultinomialOpTest(ANEURALNETWORKS_TENSOR_FLOAT16);
3032 }
3033
TEST(OperationValidationTest,RANDOM_MULTINOMIAL_float32)3034 TEST(OperationValidationTest, RANDOM_MULTINOMIAL_float32) {
3035 randomMultinomialOpTest(ANEURALNETWORKS_TENSOR_FLOAT32);
3036 }
3037
TEST(OperationValidationTest,RNN_float16)3038 TEST(OperationValidationTest, RNN_float16) {
3039 uint32_t oneDimensional[1] = {5};
3040 uint32_t twoDimensional[2] = {5, 5};
3041 ANeuralNetworksOperandType floatTensor1D = {.type = ANEURALNETWORKS_TENSOR_FLOAT16,
3042 .dimensionCount = 1,
3043 .dimensions = oneDimensional,
3044 .scale = 0.0f,
3045 .zeroPoint = 0};
3046 ANeuralNetworksOperandType floatTensor2D = {.type = ANEURALNETWORKS_TENSOR_FLOAT16,
3047 .dimensionCount = 2,
3048 .dimensions = twoDimensional,
3049 .scale = 0.0f,
3050 .zeroPoint = 0};
3051 ANeuralNetworksOperandType intScalar = {.type = ANEURALNETWORKS_INT32,
3052 .dimensionCount = 0,
3053 .dimensions = nullptr,
3054 .scale = 0.0f,
3055 .zeroPoint = 0};
3056
3057 ANeuralNetworksOperandType input = floatTensor2D;
3058 ANeuralNetworksOperandType weights = floatTensor2D;
3059 ANeuralNetworksOperandType recurrentWeights = floatTensor2D;
3060 ANeuralNetworksOperandType bias = floatTensor1D;
3061 ANeuralNetworksOperandType hiddenStateIn = floatTensor2D;
3062 ANeuralNetworksOperandType activation = intScalar;
3063
3064 ANeuralNetworksOperandType hiddenStateOut = floatTensor2D;
3065 ANeuralNetworksOperandType output = floatTensor2D;
3066
3067 OperationTestBase rnnTest(ANEURALNETWORKS_RNN,
3068 {input, weights, recurrentWeights, bias, hiddenStateIn, activation},
3069 {hiddenStateOut, output});
3070 rnnTest.testOpsValidations();
3071 }
3072
TEST(OperationValidationTest,RNN_float32)3073 TEST(OperationValidationTest, RNN_float32) {
3074 uint32_t oneDimensional[1] = {5};
3075 uint32_t twoDimensional[2] = {5, 5};
3076 ANeuralNetworksOperandType floatTensor1D = {.type = ANEURALNETWORKS_TENSOR_FLOAT32,
3077 .dimensionCount = 1,
3078 .dimensions = oneDimensional,
3079 .scale = 0.0f,
3080 .zeroPoint = 0};
3081 ANeuralNetworksOperandType floatTensor2D = {.type = ANEURALNETWORKS_TENSOR_FLOAT32,
3082 .dimensionCount = 2,
3083 .dimensions = twoDimensional,
3084 .scale = 0.0f,
3085 .zeroPoint = 0};
3086 ANeuralNetworksOperandType intScalar = {.type = ANEURALNETWORKS_INT32,
3087 .dimensionCount = 0,
3088 .dimensions = nullptr,
3089 .scale = 0.0f,
3090 .zeroPoint = 0};
3091
3092 ANeuralNetworksOperandType input = floatTensor2D;
3093 ANeuralNetworksOperandType weights = floatTensor2D;
3094 ANeuralNetworksOperandType recurrentWeights = floatTensor2D;
3095 ANeuralNetworksOperandType bias = floatTensor1D;
3096 ANeuralNetworksOperandType hiddenStateIn = floatTensor2D;
3097 ANeuralNetworksOperandType activation = intScalar;
3098
3099 ANeuralNetworksOperandType hiddenStateOut = floatTensor2D;
3100 ANeuralNetworksOperandType output = floatTensor2D;
3101
3102 OperationTestBase rnnTest(ANEURALNETWORKS_RNN,
3103 {input, weights, recurrentWeights, bias, hiddenStateIn, activation},
3104 {hiddenStateOut, output});
3105 rnnTest.testOpsValidations();
3106 }
3107
TEST(OperationValidationTest,SVDF_float32)3108 TEST(OperationValidationTest, SVDF_float32) {
3109 uint32_t oneDimensional[1] = {5};
3110 uint32_t twoDimensional[2] = {5, 5};
3111 ANeuralNetworksOperandType floatTensor1D = {.type = ANEURALNETWORKS_TENSOR_FLOAT32,
3112 .dimensionCount = 1,
3113 .dimensions = oneDimensional,
3114 .scale = 0.0f,
3115 .zeroPoint = 0};
3116 ANeuralNetworksOperandType floatTensor2D = {.type = ANEURALNETWORKS_TENSOR_FLOAT32,
3117 .dimensionCount = 2,
3118 .dimensions = twoDimensional,
3119 .scale = 0.0f,
3120 .zeroPoint = 0};
3121 ANeuralNetworksOperandType intScalar = {.type = ANEURALNETWORKS_INT32,
3122 .dimensionCount = 0,
3123 .dimensions = nullptr,
3124 .scale = 0.0f,
3125 .zeroPoint = 0};
3126
3127 ANeuralNetworksOperandType input = floatTensor2D;
3128 ANeuralNetworksOperandType weightsFeature = floatTensor2D;
3129 ANeuralNetworksOperandType weightsTime = floatTensor2D;
3130 ANeuralNetworksOperandType bias = floatTensor1D;
3131 ANeuralNetworksOperandType stateIn = floatTensor2D;
3132 ANeuralNetworksOperandType rank = intScalar;
3133 ANeuralNetworksOperandType activation = intScalar;
3134
3135 ANeuralNetworksOperandType stateOut = floatTensor2D;
3136 ANeuralNetworksOperandType output = floatTensor2D;
3137
3138 OperationTestBase svdfTest(
3139 ANEURALNETWORKS_SVDF,
3140 {input, weightsFeature, weightsTime, bias, stateIn, rank, activation},
3141 {stateOut, output});
3142 svdfTest.testOpsValidations();
3143 }
3144
TEST(OperationValidationTest,SVDF_float16)3145 TEST(OperationValidationTest, SVDF_float16) {
3146 uint32_t oneDimensional[1] = {5};
3147 uint32_t twoDimensional[2] = {5, 5};
3148 ANeuralNetworksOperandType floatTensor1D = {.type = ANEURALNETWORKS_TENSOR_FLOAT16,
3149 .dimensionCount = 1,
3150 .dimensions = oneDimensional,
3151 .scale = 0.0f,
3152 .zeroPoint = 0};
3153 ANeuralNetworksOperandType floatTensor2D = {.type = ANEURALNETWORKS_TENSOR_FLOAT16,
3154 .dimensionCount = 2,
3155 .dimensions = twoDimensional,
3156 .scale = 0.0f,
3157 .zeroPoint = 0};
3158 ANeuralNetworksOperandType intScalar = {.type = ANEURALNETWORKS_INT32,
3159 .dimensionCount = 0,
3160 .dimensions = nullptr,
3161 .scale = 0.0f,
3162 .zeroPoint = 0};
3163
3164 ANeuralNetworksOperandType input = floatTensor2D;
3165 ANeuralNetworksOperandType weightsFeature = floatTensor2D;
3166 ANeuralNetworksOperandType weightsTime = floatTensor2D;
3167 ANeuralNetworksOperandType bias = floatTensor1D;
3168 ANeuralNetworksOperandType stateIn = floatTensor2D;
3169 ANeuralNetworksOperandType rank = intScalar;
3170 ANeuralNetworksOperandType activation = intScalar;
3171
3172 ANeuralNetworksOperandType stateOut = floatTensor2D;
3173 ANeuralNetworksOperandType output = floatTensor2D;
3174
3175 OperationTestBase svdfTest(
3176 ANEURALNETWORKS_SVDF,
3177 {input, weightsFeature, weightsTime, bias, stateIn, rank, activation},
3178 {stateOut, output});
3179 svdfTest.testOpsValidations();
3180 }
3181
stridedSliceOpTest(int32_t operandCode)3182 void stridedSliceOpTest(int32_t operandCode) {
3183 uint32_t inputDimensions[2] = {5, 5};
3184 ANeuralNetworksOperandType input = getOpType(operandCode, 2, inputDimensions);
3185 ANeuralNetworksOperandType output = input;
3186
3187 uint32_t beginsDimensions[1] = {2};
3188 ANeuralNetworksOperandType begins = {.type = ANEURALNETWORKS_TENSOR_INT32,
3189 .dimensionCount = 1,
3190 .dimensions = beginsDimensions,
3191 .scale = 0.0f,
3192 .zeroPoint = 0};
3193
3194 ANeuralNetworksOperandType ends = begins;
3195 ANeuralNetworksOperandType strides = begins;
3196
3197 ANeuralNetworksOperandType beginMask = {.type = ANEURALNETWORKS_INT32,
3198 .dimensionCount = 0,
3199 .dimensions = nullptr,
3200 .scale = 0.0f,
3201 .zeroPoint = 0};
3202 ANeuralNetworksOperandType endMask = beginMask;
3203 ANeuralNetworksOperandType shrinkAxisMask = beginMask;
3204
3205 OperationTestBase stridedSliceTest(
3206 ANEURALNETWORKS_STRIDED_SLICE,
3207 {input, begins, ends, strides, beginMask, endMask, shrinkAxisMask}, {output},
3208 {{TensorRankConstraint::UpTo(4)}});
3209 stridedSliceTest.testOpsValidations();
3210 }
3211
TEST(OperationValidationTest,STRIDED_SLICE_float32)3212 TEST(OperationValidationTest, STRIDED_SLICE_float32) {
3213 stridedSliceOpTest(ANEURALNETWORKS_TENSOR_FLOAT32);
3214 }
3215
TEST(OperationValidationTest,STRIDED_SLICE_float16)3216 TEST(OperationValidationTest, STRIDED_SLICE_float16) {
3217 stridedSliceOpTest(ANEURALNETWORKS_TENSOR_FLOAT16);
3218 }
3219
TEST(OperationValidationTest,STRIDED_SLICE_quant8)3220 TEST(OperationValidationTest, STRIDED_SLICE_quant8) {
3221 stridedSliceOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
3222 }
3223
TEST(OperationValidationTest,STRIDED_SLICE_quant8_signed)3224 TEST(OperationValidationTest, STRIDED_SLICE_quant8_signed) {
3225 stridedSliceOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
3226 }
3227
roiAlignOpTest(int32_t inputOperandCode,int32_t roiOperandCode,int32_t scalarOperandCode)3228 void roiAlignOpTest(int32_t inputOperandCode, int32_t roiOperandCode, int32_t scalarOperandCode) {
3229 uint32_t inDim[] = {1, 4, 4, 1}, roiDim[] = {4, 4}, batchSplitDim[] = {1};
3230 uint32_t outDim[] = {4, 2, 2, 1};
3231 OperationTestBase roiAlignTest(
3232 ANEURALNETWORKS_ROI_ALIGN,
3233 {getOpType(inputOperandCode, 4, inDim), getOpType(roiOperandCode, 2, roiDim),
3234 getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, batchSplitDim),
3235 getOpType(ANEURALNETWORKS_INT32), getOpType(ANEURALNETWORKS_INT32),
3236 getOpType(scalarOperandCode), getOpType(scalarOperandCode),
3237 getOpType(ANEURALNETWORKS_INT32), getOpType(ANEURALNETWORKS_INT32),
3238 getOpType(ANEURALNETWORKS_BOOL)},
3239 {getOpType(inputOperandCode, 4, outDim)});
3240 roiAlignTest.testOpsValidations();
3241 }
3242
TEST(OperationValidationTest,ROI_ALIGN_float16)3243 TEST(OperationValidationTest, ROI_ALIGN_float16) {
3244 roiAlignOpTest(ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_TENSOR_FLOAT16,
3245 ANEURALNETWORKS_FLOAT16);
3246 }
3247
TEST(OperationValidationTest,ROI_ALIGN_float32)3248 TEST(OperationValidationTest, ROI_ALIGN_float32) {
3249 roiAlignOpTest(ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_TENSOR_FLOAT32,
3250 ANEURALNETWORKS_FLOAT32);
3251 }
3252
TEST(OperationValidationTest,ROI_ALIGN_quant8)3253 TEST(OperationValidationTest, ROI_ALIGN_quant8) {
3254 roiAlignOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, ANEURALNETWORKS_TENSOR_QUANT16_ASYMM,
3255 ANEURALNETWORKS_FLOAT32);
3256 }
3257
TEST(OperationValidationTest,ROI_ALIGN_quant8signed)3258 TEST(OperationValidationTest, ROI_ALIGN_quant8signed) {
3259 roiAlignOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED, ANEURALNETWORKS_TENSOR_QUANT16_ASYMM,
3260 ANEURALNETWORKS_FLOAT32);
3261 }
3262
roiPoolingOpTest(int32_t inputOperandCode,int32_t roiOperandCode,int32_t scalarOperandCode)3263 void roiPoolingOpTest(int32_t inputOperandCode, int32_t roiOperandCode, int32_t scalarOperandCode) {
3264 uint32_t inDim[] = {1, 4, 4, 1}, roiDim[] = {4, 4}, batchSplitDim[] = {1};
3265 uint32_t outDim[] = {4, 2, 2, 1};
3266 OperationTestBase roiPoolingTest(
3267 ANEURALNETWORKS_ROI_POOLING,
3268 {getOpType(inputOperandCode, 4, inDim), getOpType(roiOperandCode, 2, roiDim),
3269 getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, batchSplitDim),
3270 getOpType(ANEURALNETWORKS_INT32), getOpType(ANEURALNETWORKS_INT32),
3271 getOpType(scalarOperandCode), getOpType(scalarOperandCode),
3272 getOpType(ANEURALNETWORKS_BOOL)},
3273 {getOpType(inputOperandCode, 4, outDim)});
3274 roiPoolingTest.testOpsValidations();
3275 }
3276
TEST(OperationValidationTest,ROI_POOLING_float16)3277 TEST(OperationValidationTest, ROI_POOLING_float16) {
3278 roiPoolingOpTest(ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_TENSOR_FLOAT16,
3279 ANEURALNETWORKS_FLOAT16);
3280 }
3281
TEST(OperationValidationTest,ROI_POOLING_float32)3282 TEST(OperationValidationTest, ROI_POOLING_float32) {
3283 roiPoolingOpTest(ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_TENSOR_FLOAT32,
3284 ANEURALNETWORKS_FLOAT32);
3285 }
3286
TEST(OperationValidationTest,ROI_POOLING_quant8)3287 TEST(OperationValidationTest, ROI_POOLING_quant8) {
3288 roiPoolingOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, ANEURALNETWORKS_TENSOR_QUANT16_ASYMM,
3289 ANEURALNETWORKS_FLOAT32);
3290 }
3291
TEST(OperationValidationTest,ROI_POOLING_quant8signed)3292 TEST(OperationValidationTest, ROI_POOLING_quant8signed) {
3293 roiPoolingOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED,
3294 ANEURALNETWORKS_TENSOR_QUANT16_ASYMM, ANEURALNETWORKS_FLOAT32);
3295 }
3296
heatmapMaxKeypointOpTest(int32_t heatmapOperandCode,int32_t roiOperandCode)3297 void heatmapMaxKeypointOpTest(int32_t heatmapOperandCode, int32_t roiOperandCode) {
3298 uint32_t heatmapDim[] = {6, 4, 4, 1}, boxDim[] = {6, 4}, outScoreDim[] = {6, 1},
3299 outKeypointDim[] = {6, 1, 2};
3300 OperationTestBase heatmapMaxKeypointTest(
3301 ANEURALNETWORKS_HEATMAP_MAX_KEYPOINT,
3302 {getOpType(heatmapOperandCode, 4, heatmapDim), getOpType(roiOperandCode, 2, boxDim),
3303 getOpType(ANEURALNETWORKS_BOOL)},
3304 {getOpType(heatmapOperandCode, 2, outScoreDim),
3305 getOpType(roiOperandCode, 3, outKeypointDim)});
3306 heatmapMaxKeypointTest.testOpsValidations();
3307 }
3308
TEST(OperationValidationTest,HEATMAP_MAX_KEYPOINT_float16)3309 TEST(OperationValidationTest, HEATMAP_MAX_KEYPOINT_float16) {
3310 heatmapMaxKeypointOpTest(ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_TENSOR_FLOAT16);
3311 }
3312
TEST(OperationValidationTest,HEATMAP_MAX_KEYPOINT_float32)3313 TEST(OperationValidationTest, HEATMAP_MAX_KEYPOINT_float32) {
3314 heatmapMaxKeypointOpTest(ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_TENSOR_FLOAT32);
3315 }
3316
TEST(OperationValidationTest,HEATMAP_MAX_KEYPOINT_quant)3317 TEST(OperationValidationTest, HEATMAP_MAX_KEYPOINT_quant) {
3318 heatmapMaxKeypointOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM,
3319 ANEURALNETWORKS_TENSOR_QUANT16_ASYMM);
3320 }
3321
TEST(OperationValidationTest,HEATMAP_MAX_KEYPOINT_quant_signed)3322 TEST(OperationValidationTest, HEATMAP_MAX_KEYPOINT_quant_signed) {
3323 heatmapMaxKeypointOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED,
3324 ANEURALNETWORKS_TENSOR_QUANT16_ASYMM);
3325 }
3326
instanceNormalizationOpTest(int32_t inputOperandType)3327 void instanceNormalizationOpTest(int32_t inputOperandType) {
3328 SCOPED_TRACE(inputOperandType);
3329 uint32_t inputDims[4] = {4, 4, 4, 4};
3330 ANeuralNetworksOperandType input = getOpType(inputOperandType, 4, inputDims);
3331 ANeuralNetworksOperandType floatScalar = getOpType(ANEURALNETWORKS_FLOAT32);
3332 if (inputOperandType == ANEURALNETWORKS_TENSOR_FLOAT16) {
3333 floatScalar = getOpType(ANEURALNETWORKS_FLOAT16);
3334 }
3335 ANeuralNetworksOperandType gamma = floatScalar;
3336 ANeuralNetworksOperandType beta = floatScalar;
3337 ANeuralNetworksOperandType epsilon = floatScalar;
3338 ANeuralNetworksOperandType isNCHW = getOpType(ANEURALNETWORKS_BOOL);
3339 ANeuralNetworksOperandType output = input;
3340
3341 OperationTestBase test(ANEURALNETWORKS_INSTANCE_NORMALIZATION,
3342 {input, gamma, beta, epsilon, isNCHW}, {output});
3343 test.testOpsValidations();
3344 }
3345
TEST(OperationValidationTest,INSTANCE_NORMALIZATION)3346 TEST(OperationValidationTest, INSTANCE_NORMALIZATION) {
3347 instanceNormalizationOpTest(ANEURALNETWORKS_TENSOR_FLOAT16);
3348 instanceNormalizationOpTest(ANEURALNETWORKS_TENSOR_FLOAT32);
3349 }
3350
groupedConvOpTest(int32_t inputOperandCode,int32_t filterOperandCode)3351 void groupedConvOpTest(int32_t inputOperandCode, int32_t filterOperandCode) {
3352 uint32_t inDim[] = {1, 3, 3, 2}, filterDim[] = {2, 2, 2, 1}, biasDim[] = {2};
3353 uint32_t outDim[] = {1, 2, 2, 2};
3354 ANeuralNetworksOperandType input = getOpType(inputOperandCode, 4, inDim);
3355
3356 float filterScales[2] = {0.5f, 1.0f};
3357 ANeuralNetworksOperandType filter = getOpType(filterOperandCode, 4, filterDim);
3358
3359 ANeuralNetworksSymmPerChannelQuantParams filterChannelQuantParams = {
3360 .channelDim = 0,
3361 .scaleCount = 2,
3362 .scales = filterScales,
3363 };
3364
3365 ANeuralNetworksOperandType bias = getOpType(inputOperandCode, 1, biasDim);
3366 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM ||
3367 filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED) {
3368 bias.type = ANEURALNETWORKS_TENSOR_INT32;
3369 bias.scale = 0.25f;
3370 }
3371 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
3372 bias.type = ANEURALNETWORKS_TENSOR_INT32;
3373 bias.scale = 0.0f;
3374 }
3375
3376 ANeuralNetworksOperandType scalar = getOpType(ANEURALNETWORKS_INT32);
3377 ANeuralNetworksOperandType layout = getOpType(ANEURALNETWORKS_BOOL);
3378
3379 ANeuralNetworksOperandType output = getOpType(inputOperandCode, 4, outDim);
3380
3381 OperationTestBase explicitGroupedConvTest(ANEURALNETWORKS_GROUPED_CONV_2D,
3382 {input, filter, bias, scalar, scalar, scalar, scalar,
3383 scalar, scalar, scalar, scalar, layout},
3384 {output});
3385 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
3386 explicitGroupedConvTest.setInputSymmPerChannelQuantParams(1, filterChannelQuantParams);
3387 }
3388 explicitGroupedConvTest.testOpsValidations();
3389
3390 OperationTestBase implicitGroupedConvTest(
3391 ANEURALNETWORKS_GROUPED_CONV_2D,
3392 {input, filter, bias, scalar, scalar, scalar, scalar, scalar, layout}, {output});
3393 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
3394 implicitGroupedConvTest.setInputSymmPerChannelQuantParams(1, filterChannelQuantParams);
3395 }
3396 implicitGroupedConvTest.testOpsValidations();
3397 }
3398
TEST(OperationValidationTest,GROUPED_CONV_2D_float16)3399 TEST(OperationValidationTest, GROUPED_CONV_2D_float16) {
3400 groupedConvOpTest(ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_TENSOR_FLOAT16);
3401 }
3402
TEST(OperationValidationTest,GROUPED_CONV_2D_float32)3403 TEST(OperationValidationTest, GROUPED_CONV_2D_float32) {
3404 groupedConvOpTest(ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_TENSOR_FLOAT32);
3405 }
3406
TEST(OperationValidationTest,GROUPED_CONV_2D_quant8)3407 TEST(OperationValidationTest, GROUPED_CONV_2D_quant8) {
3408 groupedConvOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
3409 }
3410
TEST(OperationValidationTest,GROUPED_CONV_2D_quant8_per_channel)3411 TEST(OperationValidationTest, GROUPED_CONV_2D_quant8_per_channel) {
3412 groupedConvOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM,
3413 ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL);
3414 }
3415
TEST(OperationValidationTest,GROUPED_CONV_2D_quant8signed)3416 TEST(OperationValidationTest, GROUPED_CONV_2D_quant8signed) {
3417 groupedConvOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED,
3418 ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
3419 }
3420
TEST(OperationValidationTest,GROUPED_CONV_2D_quant8signed_per_channel)3421 TEST(OperationValidationTest, GROUPED_CONV_2D_quant8signed_per_channel) {
3422 groupedConvOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED,
3423 ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL);
3424 }
3425
transposeConvOpTest(int32_t inputOperandCode,int32_t filterOperandCode)3426 void transposeConvOpTest(int32_t inputOperandCode, int32_t filterOperandCode) {
3427 uint32_t inDim[] = {1, 2, 2, 2}, filterDim[] = {2, 3, 3, 1}, biasDim[] = {2};
3428 uint32_t outDim[] = {1, 5, 5, 2}, outShapeDim[] = {4};
3429 ANeuralNetworksOperandType input = getOpType(inputOperandCode, 4, inDim);
3430 ANeuralNetworksOperandType filter = getOpType(filterOperandCode, 4, filterDim);
3431
3432 float filterScales[2] = {0.5f, 1.0f};
3433 ANeuralNetworksSymmPerChannelQuantParams filterChannelQuantParams = {
3434 .channelDim = 0,
3435 .scaleCount = 2,
3436 .scales = filterScales,
3437 };
3438
3439 ANeuralNetworksOperandType bias = getOpType(inputOperandCode, 1, biasDim);
3440 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM ||
3441 filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED) {
3442 bias.type = ANEURALNETWORKS_TENSOR_INT32;
3443 bias.scale = 0.25f;
3444 }
3445 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
3446 bias.type = ANEURALNETWORKS_TENSOR_INT32;
3447 bias.scale = 0.0f;
3448 }
3449
3450 ANeuralNetworksOperandType scalar = getOpType(ANEURALNETWORKS_INT32);
3451 ANeuralNetworksOperandType layout = getOpType(ANEURALNETWORKS_BOOL);
3452 ANeuralNetworksOperandType output = getOpType(inputOperandCode, 4, outDim);
3453
3454 OperationTestBase explicitTransposeConvTest(
3455 ANEURALNETWORKS_TRANSPOSE_CONV_2D,
3456 {input, filter, bias, scalar, scalar, scalar, scalar, scalar, scalar, scalar, layout},
3457 {output});
3458 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
3459 explicitTransposeConvTest.setInputSymmPerChannelQuantParams(1, filterChannelQuantParams);
3460 }
3461 explicitTransposeConvTest.testOpsValidations();
3462
3463 OperationTestBase implicitTransposeConvTest(
3464 ANEURALNETWORKS_TRANSPOSE_CONV_2D,
3465 {input, filter, bias, getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, outShapeDim), scalar,
3466 scalar, scalar, scalar, layout},
3467 {output});
3468 if (filterOperandCode == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
3469 implicitTransposeConvTest.setInputSymmPerChannelQuantParams(1, filterChannelQuantParams);
3470 }
3471 implicitTransposeConvTest.testOpsValidations();
3472 }
3473
TEST(OperationValidationTest,TRANSPOSE_CONV_2D_float16)3474 TEST(OperationValidationTest, TRANSPOSE_CONV_2D_float16) {
3475 transposeConvOpTest(ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_TENSOR_FLOAT16);
3476 }
3477
TEST(OperationValidationTest,TRANSPOSE_CONV_2D_float32)3478 TEST(OperationValidationTest, TRANSPOSE_CONV_2D_float32) {
3479 transposeConvOpTest(ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_TENSOR_FLOAT32);
3480 }
3481
TEST(OperationValidationTest,TRANSPOSE_CONV_2D_quant8)3482 TEST(OperationValidationTest, TRANSPOSE_CONV_2D_quant8) {
3483 transposeConvOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
3484 }
3485
TEST(OperationValidationTest,TRANSPOSE_CONV_2D_quant8_per_channel)3486 TEST(OperationValidationTest, TRANSPOSE_CONV_2D_quant8_per_channel) {
3487 transposeConvOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM,
3488 ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL);
3489 }
3490
TEST(OperationValidationTest,TRANSPOSE_CONV_2D_quant8_signed)3491 TEST(OperationValidationTest, TRANSPOSE_CONV_2D_quant8_signed) {
3492 transposeConvOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED,
3493 ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
3494 }
3495
TEST(OperationValidationTest,TRANSPOSE_CONV_2D_quant8_signed_per_channel)3496 TEST(OperationValidationTest, TRANSPOSE_CONV_2D_quant8_signed_per_channel) {
3497 transposeConvOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED,
3498 ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL);
3499 }
3500
channelShuffleOpTest(int32_t operandCode)3501 void channelShuffleOpTest(int32_t operandCode) {
3502 uint32_t inoutDim[] = {2, 2, 3, 12};
3503 OperationTestBase channelShuffleTest(
3504 ANEURALNETWORKS_CHANNEL_SHUFFLE,
3505 {getOpType(operandCode, 2, inoutDim), getOpType(ANEURALNETWORKS_INT32),
3506 getOpType(ANEURALNETWORKS_INT32)},
3507 {getOpType(operandCode, 2, inoutDim)}, {{TensorRankConstraint::UpTo(4)}});
3508 channelShuffleTest.testOpsValidations();
3509 }
3510
TEST(OperationValidationTest,CHANNEL_SHUFFLE_float16)3511 TEST(OperationValidationTest, CHANNEL_SHUFFLE_float16) {
3512 channelShuffleOpTest(ANEURALNETWORKS_TENSOR_FLOAT16);
3513 }
3514
TEST(OperationValidationTest,CHANNEL_SHUFFLE_float32)3515 TEST(OperationValidationTest, CHANNEL_SHUFFLE_float32) {
3516 channelShuffleOpTest(ANEURALNETWORKS_TENSOR_FLOAT32);
3517 }
3518
TEST(OperationValidationTest,CHANNEL_SHUFFLE_quant8)3519 TEST(OperationValidationTest, CHANNEL_SHUFFLE_quant8) {
3520 channelShuffleOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
3521 }
3522
TEST(OperationValidationTest,CHANNEL_SHUFFLE_quant8signed)3523 TEST(OperationValidationTest, CHANNEL_SHUFFLE_quant8signed) {
3524 channelShuffleOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
3525 }
3526
detectionPostprocessingOpTest(int32_t inputOperandCode)3527 void detectionPostprocessingOpTest(int32_t inputOperandCode) {
3528 SCOPED_TRACE(inputOperandCode);
3529 const int numBatches = 2;
3530 const int numAnchors = 10;
3531 const int numClasses = 5;
3532 const int lengthBoxEncoding = 4;
3533
3534 uint32_t inputDims[3] = {numBatches, numAnchors, numClasses};
3535 ANeuralNetworksOperandType input = getOpType(inputOperandCode, 3, inputDims);
3536 uint32_t deltasDims[3] = {numBatches, numAnchors, lengthBoxEncoding};
3537 ANeuralNetworksOperandType deltas = getOpType(inputOperandCode, 3, deltasDims);
3538 uint32_t anchorsDims[2] = {numAnchors, 4};
3539 ANeuralNetworksOperandType anchors = getOpType(inputOperandCode, 2, anchorsDims);
3540 ANeuralNetworksOperandType scaleScalar = getOpType(ANEURALNETWORKS_FLOAT32);
3541 if (inputOperandCode == ANEURALNETWORKS_TENSOR_FLOAT16) {
3542 scaleScalar = getOpType(ANEURALNETWORKS_FLOAT16);
3543 }
3544 ANeuralNetworksOperandType isRegularNMS = getOpType(ANEURALNETWORKS_BOOL);
3545 ANeuralNetworksOperandType maxNumDetections = getOpType(ANEURALNETWORKS_INT32);
3546 ANeuralNetworksOperandType numOfClassesPerDetection = maxNumDetections;
3547 ANeuralNetworksOperandType numOfDetections = numOfClassesPerDetection;
3548 ANeuralNetworksOperandType scoreThreshold = scaleScalar;
3549 ANeuralNetworksOperandType iouThreshold = scaleScalar;
3550 ANeuralNetworksOperandType includeBackground = getOpType(ANEURALNETWORKS_BOOL);
3551 // Outputs
3552 const int maxNumDetectionsValue = 5;
3553 uint32_t outputScoreDims[2] = {numBatches, maxNumDetectionsValue};
3554 ANeuralNetworksOperandType outputScore = getOpType(inputOperandCode, 2, outputScoreDims);
3555 uint32_t boundingBoxesDims[3] = {numBatches, maxNumDetectionsValue, 4};
3556 ANeuralNetworksOperandType boundingBoxes = getOpType(inputOperandCode, 3, boundingBoxesDims);
3557 ANeuralNetworksOperandType classLabel =
3558 getOpType(ANEURALNETWORKS_TENSOR_INT32, 2, outputScoreDims);
3559 uint32_t numValidDims[1] = {numBatches};
3560 ANeuralNetworksOperandType numValid = getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, numValidDims);
3561
3562 OperationTestBase test(ANEURALNETWORKS_DETECTION_POSTPROCESSING,
3563 {input, deltas, anchors, scaleScalar, scaleScalar, scaleScalar,
3564 scaleScalar, isRegularNMS, maxNumDetections, numOfClassesPerDetection,
3565 numOfDetections, scoreThreshold, iouThreshold, includeBackground},
3566 {outputScore, boundingBoxes, classLabel, numValid});
3567 test.testOpsValidations();
3568 }
3569
TEST(OperationValidationTest,DETECTION_POSTPROCESSING)3570 TEST(OperationValidationTest, DETECTION_POSTPROCESSING) {
3571 detectionPostprocessingOpTest(ANEURALNETWORKS_TENSOR_FLOAT16);
3572 detectionPostprocessingOpTest(ANEURALNETWORKS_TENSOR_FLOAT32);
3573 }
3574
preluOpTest(int32_t operandCode)3575 void preluOpTest(int32_t operandCode) {
3576 uint32_t inoutDim[] = {1, 2, 2, 3}, alphaDim[] = {1, 1, 3};
3577 OperationTestBase preluTest(
3578 ANEURALNETWORKS_PRELU,
3579 {getOpType(operandCode, 4, inoutDim), getOpType(operandCode, 3, alphaDim)},
3580 {getOpType(operandCode, 4, inoutDim)});
3581 preluTest.testOpsValidations();
3582 }
3583
TEST(OperationValidationTest,PRELU_float16)3584 TEST(OperationValidationTest, PRELU_float16) {
3585 preluOpTest(ANEURALNETWORKS_TENSOR_FLOAT16);
3586 }
3587
TEST(OperationValidationTest,PRELU_float32)3588 TEST(OperationValidationTest, PRELU_float32) {
3589 preluOpTest(ANEURALNETWORKS_TENSOR_FLOAT32);
3590 }
3591
TEST(OperationValidationTest,PRELU_quant8)3592 TEST(OperationValidationTest, PRELU_quant8) {
3593 preluOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
3594 }
3595
TEST(OperationValidationTest,PRELU_quant8signed)3596 TEST(OperationValidationTest, PRELU_quant8signed) {
3597 preluOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
3598 }
3599
normalizationOpTest(ANeuralNetworksOperationType operationCode,int32_t operandCode)3600 void normalizationOpTest(ANeuralNetworksOperationType operationCode, int32_t operandCode) {
3601 uint32_t inputDim[] = {2, 2, 2, 2};
3602 OperationTestBase normalizationTest(operationCode, {getOpType(operandCode, 4, inputDim)},
3603 {getOpType(operandCode, 4, inputDim)});
3604 normalizationTest.testOpsValidations();
3605
3606 OperationTestBase normalizationAxisTest(
3607 operationCode, {getOpType(operandCode, 4, inputDim), getOpType(ANEURALNETWORKS_INT32)},
3608 {getOpType(operandCode, 4, inputDim)}, {{TensorRankConstraint::UpTo(4)}});
3609 normalizationAxisTest.testOpsValidations();
3610 }
3611
TEST(OperationValidationTest,L2_NORMALIZATION_float16)3612 TEST(OperationValidationTest, L2_NORMALIZATION_float16) {
3613 normalizationOpTest(ANEURALNETWORKS_L2_NORMALIZATION, ANEURALNETWORKS_TENSOR_FLOAT16);
3614 }
3615
TEST(OperationValidationTest,L2_NORMALIZATION_float32)3616 TEST(OperationValidationTest, L2_NORMALIZATION_float32) {
3617 normalizationOpTest(ANEURALNETWORKS_L2_NORMALIZATION, ANEURALNETWORKS_TENSOR_FLOAT32);
3618 }
3619
TEST(OperationValidationTest,L2_NORMALIZATION_quant8)3620 TEST(OperationValidationTest, L2_NORMALIZATION_quant8) {
3621 normalizationOpTest(ANEURALNETWORKS_L2_NORMALIZATION, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
3622 }
3623
TEST(OperationValidationTest,L2_NORMALIZATION_quant8_signed)3624 TEST(OperationValidationTest, L2_NORMALIZATION_quant8_signed) {
3625 normalizationOpTest(ANEURALNETWORKS_L2_NORMALIZATION,
3626 ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
3627 }
3628
localResponseNormOpTest(int32_t operandCode)3629 void localResponseNormOpTest(int32_t operandCode) {
3630 int32_t floatScalarType = (operandCode == ANEURALNETWORKS_TENSOR_FLOAT32)
3631 ? ANEURALNETWORKS_FLOAT32
3632 : ANEURALNETWORKS_FLOAT16;
3633 uint32_t inputDim[] = {2, 2, 2, 6};
3634 OperationTestBase lrnTest(
3635 ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION,
3636 {getOpType(operandCode, 4, inputDim), getOpType(ANEURALNETWORKS_INT32),
3637 getOpType(floatScalarType), getOpType(floatScalarType), getOpType(floatScalarType)},
3638 {getOpType(operandCode, 4, inputDim)}, {{TensorRankConstraint::UpTo(4), {0}}});
3639 lrnTest.testOpsValidations();
3640
3641 OperationTestBase lrnAxisTest(
3642 ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION,
3643 {getOpType(operandCode, 4, inputDim), getOpType(ANEURALNETWORKS_INT32),
3644 getOpType(floatScalarType), getOpType(floatScalarType), getOpType(floatScalarType),
3645 getOpType(ANEURALNETWORKS_INT32)},
3646 {getOpType(operandCode, 4, inputDim)}, {{TensorRankConstraint::UpTo(4), {0}}});
3647 lrnAxisTest.testOpsValidations();
3648 }
3649
TEST(OperationValidationTest,LOCAL_RESPONSE_NORMALIZATION_float16)3650 TEST(OperationValidationTest, LOCAL_RESPONSE_NORMALIZATION_float16) {
3651 localResponseNormOpTest(ANEURALNETWORKS_TENSOR_FLOAT16);
3652 }
3653
TEST(OperationValidationTest,LOCAL_RESPONSE_NORMALIZATION_float32)3654 TEST(OperationValidationTest, LOCAL_RESPONSE_NORMALIZATION_float32) {
3655 localResponseNormOpTest(ANEURALNETWORKS_TENSOR_FLOAT32);
3656 }
3657
axisAlignedBBoxTransformOpTest(int32_t roiOperandCode,int32_t deltaOperandCode)3658 void axisAlignedBBoxTransformOpTest(int32_t roiOperandCode, int32_t deltaOperandCode) {
3659 uint32_t roiDim[] = {5, 4}, deltaDim[] = {5, 8}, bsDim[] = {5}, imageDim[] = {5, 2};
3660 uint32_t outDim[] = {5, 8};
3661 OperationTestBase axisAlignedBBoxTransformTest(
3662 ANEURALNETWORKS_AXIS_ALIGNED_BBOX_TRANSFORM,
3663 {getOpType(roiOperandCode, 2, roiDim), getOpType(deltaOperandCode, 2, deltaDim),
3664 getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, bsDim),
3665 getOpType(roiOperandCode, 2, imageDim)},
3666 {getOpType(roiOperandCode, 2, outDim)});
3667 axisAlignedBBoxTransformTest.testOpsValidations();
3668 }
3669
TEST(OperationValidationTest,AXIS_ALIGNED_BBOX_TRANSFORM_float16)3670 TEST(OperationValidationTest, AXIS_ALIGNED_BBOX_TRANSFORM_float16) {
3671 axisAlignedBBoxTransformOpTest(ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_TENSOR_FLOAT16);
3672 }
3673
TEST(OperationValidationTest,AXIS_ALIGNED_BBOX_TRANSFORM_float32)3674 TEST(OperationValidationTest, AXIS_ALIGNED_BBOX_TRANSFORM_float32) {
3675 axisAlignedBBoxTransformOpTest(ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_TENSOR_FLOAT32);
3676 }
3677
TEST(OperationValidationTest,AXIS_ALIGNED_BBOX_TRANSFORM_quant)3678 TEST(OperationValidationTest, AXIS_ALIGNED_BBOX_TRANSFORM_quant) {
3679 axisAlignedBBoxTransformOpTest(ANEURALNETWORKS_TENSOR_QUANT16_ASYMM,
3680 ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
3681 }
3682
TEST(OperationValidationTest,AXIS_ALIGNED_BBOX_TRANSFORM_quant_signed)3683 TEST(OperationValidationTest, AXIS_ALIGNED_BBOX_TRANSFORM_quant_signed) {
3684 axisAlignedBBoxTransformOpTest(ANEURALNETWORKS_TENSOR_QUANT16_ASYMM,
3685 ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
3686 }
3687
sliceTest(int32_t operandCode)3688 void sliceTest(int32_t operandCode) {
3689 uint32_t inputDim[] = {3, 3, 3};
3690 uint32_t startDim[] = {3};
3691 uint32_t sizeDim[] = {3};
3692 uint32_t outputDim[] = {1, 2, 3};
3693
3694 OperationTestBase sliceTest(ANEURALNETWORKS_SLICE,
3695 {getOpType(operandCode, 3, inputDim),
3696 getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, startDim),
3697 getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, sizeDim)},
3698 {getOpType(operandCode, 3, outputDim)});
3699 sliceTest.testOpsValidations();
3700 }
3701
TEST(OperationValidationTest,SLICE_float32)3702 TEST(OperationValidationTest, SLICE_float32) {
3703 sliceTest(ANEURALNETWORKS_TENSOR_FLOAT32);
3704 }
TEST(OperationValidationTest,SLICE_int32)3705 TEST(OperationValidationTest, SLICE_int32) {
3706 sliceTest(ANEURALNETWORKS_TENSOR_INT32);
3707 }
TEST(OperationValidationTest,SLICE_uint8)3708 TEST(OperationValidationTest, SLICE_uint8) {
3709 sliceTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
3710 }
TEST(OperationValidationTest,SLICE_int8)3711 TEST(OperationValidationTest, SLICE_int8) {
3712 sliceTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
3713 }
TEST(OperationValidationTest,SLICE_float16)3714 TEST(OperationValidationTest, SLICE_float16) {
3715 sliceTest(ANEURALNETWORKS_TENSOR_FLOAT16);
3716 }
3717
logicalTest(ANeuralNetworksOperationType operationCode)3718 void logicalTest(ANeuralNetworksOperationType operationCode) {
3719 uint32_t inputDimensions[4] = {2, 2, 2, 2};
3720 ANeuralNetworksOperandType input1 = {.type = ANEURALNETWORKS_TENSOR_BOOL8,
3721 .dimensionCount = 4,
3722 .dimensions = inputDimensions,
3723 .scale = 0.0f,
3724 .zeroPoint = 0};
3725 ANeuralNetworksOperandType input2 = input1;
3726 ANeuralNetworksOperandType output = input1;
3727
3728 OperationTestBase test(operationCode, {input1, input2}, {output});
3729 test.testOpsValidations();
3730 }
3731
TEST(OperationValidationTest,LOGICAL_AND)3732 TEST(OperationValidationTest, LOGICAL_AND) {
3733 logicalTest(ANEURALNETWORKS_LOGICAL_AND);
3734 }
3735
TEST(OperationValidationTest,LOGICAL_OR)3736 TEST(OperationValidationTest, LOGICAL_OR) {
3737 logicalTest(ANEURALNETWORKS_LOGICAL_OR);
3738 }
3739
comparisonTest(ANeuralNetworksOperationType operationCode,int32_t inputOperandType)3740 void comparisonTest(ANeuralNetworksOperationType operationCode, int32_t inputOperandType) {
3741 uint32_t inputDimensions[4] = {2, 2, 2, 2};
3742 ANeuralNetworksOperandType input1 = getOpType(inputOperandType, 4, inputDimensions);
3743 ANeuralNetworksOperandType input2 = input1;
3744 ANeuralNetworksOperandType output = {.type = ANEURALNETWORKS_TENSOR_BOOL8,
3745 .dimensionCount = 4,
3746 .dimensions = inputDimensions,
3747 .scale = 0.0f,
3748 .zeroPoint = 0};
3749 OperationTestBase test(operationCode, {input1, input2}, {output});
3750 test.testOpsValidations();
3751 }
3752
TEST(OperationValidationTest,LESS)3753 TEST(OperationValidationTest, LESS) {
3754 comparisonTest(ANEURALNETWORKS_LESS, ANEURALNETWORKS_TENSOR_BOOL8);
3755 comparisonTest(ANEURALNETWORKS_LESS, ANEURALNETWORKS_TENSOR_FLOAT16);
3756 comparisonTest(ANEURALNETWORKS_LESS, ANEURALNETWORKS_TENSOR_FLOAT32);
3757 comparisonTest(ANEURALNETWORKS_LESS, ANEURALNETWORKS_TENSOR_INT32);
3758 comparisonTest(ANEURALNETWORKS_LESS, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
3759 comparisonTest(ANEURALNETWORKS_LESS, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
3760 }
3761
TEST(OperationValidationTest,LESS_EQUAL)3762 TEST(OperationValidationTest, LESS_EQUAL) {
3763 comparisonTest(ANEURALNETWORKS_LESS_EQUAL, ANEURALNETWORKS_TENSOR_BOOL8);
3764 comparisonTest(ANEURALNETWORKS_LESS_EQUAL, ANEURALNETWORKS_TENSOR_FLOAT16);
3765 comparisonTest(ANEURALNETWORKS_LESS_EQUAL, ANEURALNETWORKS_TENSOR_FLOAT32);
3766 comparisonTest(ANEURALNETWORKS_LESS_EQUAL, ANEURALNETWORKS_TENSOR_INT32);
3767 comparisonTest(ANEURALNETWORKS_LESS_EQUAL, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
3768 comparisonTest(ANEURALNETWORKS_LESS_EQUAL, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
3769 }
3770
TEST(OperationValidationTest,EQUAL)3771 TEST(OperationValidationTest, EQUAL) {
3772 comparisonTest(ANEURALNETWORKS_EQUAL, ANEURALNETWORKS_TENSOR_BOOL8);
3773 comparisonTest(ANEURALNETWORKS_EQUAL, ANEURALNETWORKS_TENSOR_FLOAT16);
3774 comparisonTest(ANEURALNETWORKS_EQUAL, ANEURALNETWORKS_TENSOR_FLOAT32);
3775 comparisonTest(ANEURALNETWORKS_EQUAL, ANEURALNETWORKS_TENSOR_INT32);
3776 comparisonTest(ANEURALNETWORKS_EQUAL, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
3777 comparisonTest(ANEURALNETWORKS_EQUAL, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
3778 }
3779
TEST(OperationValidationTest,NOT_EQUAL)3780 TEST(OperationValidationTest, NOT_EQUAL) {
3781 comparisonTest(ANEURALNETWORKS_NOT_EQUAL, ANEURALNETWORKS_TENSOR_BOOL8);
3782 comparisonTest(ANEURALNETWORKS_NOT_EQUAL, ANEURALNETWORKS_TENSOR_FLOAT16);
3783 comparisonTest(ANEURALNETWORKS_NOT_EQUAL, ANEURALNETWORKS_TENSOR_FLOAT32);
3784 comparisonTest(ANEURALNETWORKS_NOT_EQUAL, ANEURALNETWORKS_TENSOR_INT32);
3785 comparisonTest(ANEURALNETWORKS_NOT_EQUAL, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
3786 comparisonTest(ANEURALNETWORKS_NOT_EQUAL, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
3787 }
3788
TEST(OperationValidationTest,GREATER)3789 TEST(OperationValidationTest, GREATER) {
3790 comparisonTest(ANEURALNETWORKS_GREATER, ANEURALNETWORKS_TENSOR_BOOL8);
3791 comparisonTest(ANEURALNETWORKS_GREATER, ANEURALNETWORKS_TENSOR_FLOAT16);
3792 comparisonTest(ANEURALNETWORKS_GREATER, ANEURALNETWORKS_TENSOR_FLOAT32);
3793 comparisonTest(ANEURALNETWORKS_GREATER, ANEURALNETWORKS_TENSOR_INT32);
3794 comparisonTest(ANEURALNETWORKS_GREATER, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
3795 comparisonTest(ANEURALNETWORKS_GREATER, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
3796 }
3797
TEST(OperationValidationTest,GREATER_EQUAL)3798 TEST(OperationValidationTest, GREATER_EQUAL) {
3799 comparisonTest(ANEURALNETWORKS_GREATER_EQUAL, ANEURALNETWORKS_TENSOR_BOOL8);
3800 comparisonTest(ANEURALNETWORKS_GREATER_EQUAL, ANEURALNETWORKS_TENSOR_FLOAT16);
3801 comparisonTest(ANEURALNETWORKS_GREATER_EQUAL, ANEURALNETWORKS_TENSOR_FLOAT32);
3802 comparisonTest(ANEURALNETWORKS_GREATER_EQUAL, ANEURALNETWORKS_TENSOR_INT32);
3803 comparisonTest(ANEURALNETWORKS_GREATER_EQUAL, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
3804 comparisonTest(ANEURALNETWORKS_GREATER_EQUAL, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
3805 }
3806
reduceOpTest(ANeuralNetworksOperationType operationCode,int32_t inputOperandType)3807 void reduceOpTest(ANeuralNetworksOperationType operationCode, int32_t inputOperandType) {
3808 uint32_t inputDimensions[4] = {2, 2, 2, 2};
3809 ANeuralNetworksOperandType input1 = getOpType(inputOperandType, 4, inputDimensions);
3810 uint32_t axesDimensions[1] = {2};
3811 ANeuralNetworksOperandType input2 = getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, axesDimensions);
3812 ANeuralNetworksOperandType input3 = getOpType(ANEURALNETWORKS_BOOL, 0);
3813 ANeuralNetworksOperandType output = getOpType(inputOperandType, 4, inputDimensions);
3814 OperationTestBase test(operationCode, {input1, input2, input3}, {output},
3815 {{TensorRankConstraint::UpTo(4)}});
3816 test.testOpsValidations();
3817 }
3818
TEST(OperationValidationTest,REDUCE_PROD)3819 TEST(OperationValidationTest, REDUCE_PROD) {
3820 reduceOpTest(ANEURALNETWORKS_REDUCE_PROD, ANEURALNETWORKS_TENSOR_FLOAT16);
3821 reduceOpTest(ANEURALNETWORKS_REDUCE_PROD, ANEURALNETWORKS_TENSOR_FLOAT32);
3822 }
3823
TEST(OperationValidationTest,REDUCE_SUM)3824 TEST(OperationValidationTest, REDUCE_SUM) {
3825 reduceOpTest(ANEURALNETWORKS_REDUCE_SUM, ANEURALNETWORKS_TENSOR_FLOAT16);
3826 reduceOpTest(ANEURALNETWORKS_REDUCE_SUM, ANEURALNETWORKS_TENSOR_FLOAT32);
3827 }
3828
TEST(OperationValidationTest,REDUCE_MAX)3829 TEST(OperationValidationTest, REDUCE_MAX) {
3830 reduceOpTest(ANEURALNETWORKS_REDUCE_MAX, ANEURALNETWORKS_TENSOR_FLOAT16);
3831 reduceOpTest(ANEURALNETWORKS_REDUCE_MAX, ANEURALNETWORKS_TENSOR_FLOAT32);
3832 reduceOpTest(ANEURALNETWORKS_REDUCE_MAX, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
3833 reduceOpTest(ANEURALNETWORKS_REDUCE_MAX, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
3834 }
3835
TEST(OperationValidationTest,REDUCE_MIN)3836 TEST(OperationValidationTest, REDUCE_MIN) {
3837 reduceOpTest(ANEURALNETWORKS_REDUCE_MIN, ANEURALNETWORKS_TENSOR_FLOAT16);
3838 reduceOpTest(ANEURALNETWORKS_REDUCE_MIN, ANEURALNETWORKS_TENSOR_FLOAT32);
3839 reduceOpTest(ANEURALNETWORKS_REDUCE_MIN, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
3840 reduceOpTest(ANEURALNETWORKS_REDUCE_MIN, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
3841 }
3842
TEST(OperationValidationTest,REDUCE_ANY)3843 TEST(OperationValidationTest, REDUCE_ANY) {
3844 reduceOpTest(ANEURALNETWORKS_REDUCE_ANY, ANEURALNETWORKS_TENSOR_BOOL8);
3845 }
3846
TEST(OperationValidationTest,REDUCE_ALL)3847 TEST(OperationValidationTest, REDUCE_ALL) {
3848 reduceOpTest(ANEURALNETWORKS_REDUCE_ALL, ANEURALNETWORKS_TENSOR_BOOL8);
3849 }
3850
selectTest(ANeuralNetworksOperationType operationCode,int32_t inputOperandType)3851 void selectTest(ANeuralNetworksOperationType operationCode, int32_t inputOperandType) {
3852 uint32_t inputDimensions[4] = {2, 2, 2, 2};
3853 ANeuralNetworksOperandType input0 = getOpType(ANEURALNETWORKS_TENSOR_BOOL8, 4, inputDimensions);
3854 ANeuralNetworksOperandType input1 = getOpType(inputOperandType, 4, inputDimensions);
3855 ANeuralNetworksOperandType input2 = input1;
3856 ANeuralNetworksOperandType output = input1;
3857
3858 OperationTestBase test(operationCode, {input0, input1, input2}, {output});
3859 test.testOpsValidations();
3860 }
3861
TEST(OperationValidationTest,SELECT)3862 TEST(OperationValidationTest, SELECT) {
3863 selectTest(ANEURALNETWORKS_SELECT, ANEURALNETWORKS_TENSOR_FLOAT16);
3864 selectTest(ANEURALNETWORKS_SELECT, ANEURALNETWORKS_TENSOR_FLOAT32);
3865 selectTest(ANEURALNETWORKS_SELECT, ANEURALNETWORKS_TENSOR_INT32);
3866 selectTest(ANEURALNETWORKS_SELECT, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
3867 selectTest(ANEURALNETWORKS_SELECT, ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
3868 }
3869
powTest(int32_t inputOperandType)3870 void powTest(int32_t inputOperandType) {
3871 const uint32_t inputDimensions[] = {3, 3};
3872 ANeuralNetworksOperandType inputType = {.type = inputOperandType,
3873 .dimensionCount = 2,
3874 .dimensions = inputDimensions,
3875 .scale = 0.0f,
3876 .zeroPoint = 0};
3877
3878 OperationTestBase test(ANEURALNETWORKS_POW, {inputType, inputType}, {inputType});
3879 test.testOpsValidations();
3880 }
3881
TEST(OperationValidationTest,POW)3882 TEST(OperationValidationTest, POW) {
3883 powTest(ANEURALNETWORKS_TENSOR_FLOAT16);
3884 powTest(ANEURALNETWORKS_TENSOR_FLOAT32);
3885 }
3886
boxWithNmsLimitOpTest(int32_t scoreOperandCode,int32_t roiOperandCode,int32_t scalarOperandCode)3887 void boxWithNmsLimitOpTest(int32_t scoreOperandCode, int32_t roiOperandCode,
3888 int32_t scalarOperandCode) {
3889 uint32_t scoreDim[] = {19, 3}, roiDim[] = {19, 12}, splitDim[] = {2};
3890 uint32_t outScoreDim[] = {12}, outRoiDim[] = {12, 4}, outClassDim[] = {12}, outSplitDim[] = {2};
3891 OperationTestBase boxWithNmsLimitTest(
3892 ANEURALNETWORKS_BOX_WITH_NMS_LIMIT,
3893 {getOpType(scoreOperandCode, 2, scoreDim), getOpType(roiOperandCode, 2, roiDim),
3894 getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, splitDim), getOpType(scalarOperandCode),
3895 getOpType(ANEURALNETWORKS_INT32), getOpType(ANEURALNETWORKS_INT32),
3896 getOpType(scalarOperandCode), getOpType(scalarOperandCode),
3897 getOpType(scalarOperandCode)},
3898 {getOpType(scoreOperandCode, 1, outScoreDim), getOpType(roiOperandCode, 2, outRoiDim),
3899 getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, outClassDim),
3900 getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, outSplitDim)});
3901 boxWithNmsLimitTest.testOpsValidations();
3902 }
3903
TEST(OperationValidationTest,BOX_WITH_NMS_LIMIT_float16)3904 TEST(OperationValidationTest, BOX_WITH_NMS_LIMIT_float16) {
3905 boxWithNmsLimitOpTest(ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_TENSOR_FLOAT16,
3906 ANEURALNETWORKS_FLOAT16);
3907 }
3908
TEST(OperationValidationTest,BOX_WITH_NMS_LIMIT_float32)3909 TEST(OperationValidationTest, BOX_WITH_NMS_LIMIT_float32) {
3910 boxWithNmsLimitOpTest(ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_TENSOR_FLOAT32,
3911 ANEURALNETWORKS_FLOAT32);
3912 }
3913
TEST(OperationValidationTest,BOX_WITH_NMS_LIMIT_quant)3914 TEST(OperationValidationTest, BOX_WITH_NMS_LIMIT_quant) {
3915 boxWithNmsLimitOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, ANEURALNETWORKS_TENSOR_QUANT16_ASYMM,
3916 ANEURALNETWORKS_FLOAT32);
3917 }
3918
TEST(OperationValidationTest,BOX_WITH_NMS_LIMIT_quant_signed)3919 TEST(OperationValidationTest, BOX_WITH_NMS_LIMIT_quant_signed) {
3920 boxWithNmsLimitOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED,
3921 ANEURALNETWORKS_TENSOR_QUANT16_ASYMM, ANEURALNETWORKS_FLOAT32);
3922 }
3923
castOpTest(int32_t inputOperandCode,int32_t outputOperandCode)3924 void castOpTest(int32_t inputOperandCode, int32_t outputOperandCode) {
3925 SCOPED_TRACE(testing::Message()
3926 << "inputType: " << inputOperandCode << ", outputType: " << outputOperandCode);
3927 uint32_t inputDimensions[3] = {2, 2, 2};
3928 ANeuralNetworksOperandType input = getOpType(inputOperandCode, 3, inputDimensions);
3929 ANeuralNetworksOperandType output = getOpType(outputOperandCode, 3, inputDimensions);
3930 OperationTestBase test(ANEURALNETWORKS_CAST, {input}, {output});
3931 test.testOpsValidations();
3932 }
3933
TEST(OperationValidationTest,CAST)3934 TEST(OperationValidationTest, CAST) {
3935 std::vector<int32_t> inputTypes = {ANEURALNETWORKS_TENSOR_FLOAT16,
3936 ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_TENSOR_INT32,
3937 ANEURALNETWORKS_TENSOR_QUANT8_ASYMM};
3938 std::vector<int32_t> outputTypes = inputTypes;
3939 for (auto inputType : inputTypes) {
3940 for (auto outputType : outputTypes) {
3941 castOpTest(inputType, outputType);
3942 }
3943 }
3944 }
3945
TEST(OperationValidationTest,CAST_identity)3946 TEST(OperationValidationTest, CAST_identity) {
3947 std::vector<int32_t> inputTypes = {
3948 ANEURALNETWORKS_TENSOR_FLOAT32,
3949 ANEURALNETWORKS_TENSOR_INT32,
3950 ANEURALNETWORKS_TENSOR_QUANT8_ASYMM,
3951 ANEURALNETWORKS_TENSOR_QUANT16_SYMM,
3952 ANEURALNETWORKS_TENSOR_FLOAT16,
3953 ANEURALNETWORKS_TENSOR_BOOL8,
3954 ANEURALNETWORKS_TENSOR_QUANT16_ASYMM,
3955 ANEURALNETWORKS_TENSOR_QUANT8_SYMM,
3956 ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED,
3957 };
3958 for (auto inputType : inputTypes) {
3959 castOpTest(inputType, inputType);
3960 }
3961 }
3962
bidirectionlSequenceRNNTest(int32_t inputOperandCode)3963 void bidirectionlSequenceRNNTest(int32_t inputOperandCode) {
3964 const uint32_t batchSize = 2;
3965 const uint32_t maxTime = 3;
3966 const uint32_t inputSize = 4;
3967 const uint32_t numUnits = 5;
3968
3969 uint32_t inputDims[3] = {maxTime, batchSize, inputSize};
3970 uint32_t weightsDims[2] = {inputSize, numUnits};
3971 uint32_t recurrentWeightsDims[2] = {numUnits, numUnits};
3972 uint32_t biasDims[1] = {numUnits};
3973 uint32_t hiddenStateDims[2] = {batchSize, numUnits};
3974 uint32_t outputDims[2] = {batchSize, numUnits};
3975
3976 ANeuralNetworksOperandType input = {.type = inputOperandCode,
3977 .dimensionCount = 3,
3978 .dimensions = inputDims,
3979 .scale = 0.0f,
3980 .zeroPoint = 0};
3981 ANeuralNetworksOperandType fwWeights = {.type = inputOperandCode,
3982 .dimensionCount = 2,
3983 .dimensions = weightsDims,
3984 .scale = 0.0f,
3985 .zeroPoint = 0};
3986 ANeuralNetworksOperandType bwWeights = fwWeights;
3987 ANeuralNetworksOperandType fwRecurrentWeights = {.type = inputOperandCode,
3988 .dimensionCount = 2,
3989 .dimensions = recurrentWeightsDims,
3990 .scale = 0.0f,
3991 .zeroPoint = 0};
3992 ANeuralNetworksOperandType bwRecurrentWeights = fwRecurrentWeights;
3993 ANeuralNetworksOperandType fwBias = {.type = inputOperandCode,
3994 .dimensionCount = 1,
3995 .dimensions = biasDims,
3996 .scale = 0.0f,
3997 .zeroPoint = 0};
3998 ANeuralNetworksOperandType bwBias = fwBias;
3999 ANeuralNetworksOperandType fwHiddenState = {.type = inputOperandCode,
4000 .dimensionCount = 2,
4001 .dimensions = hiddenStateDims,
4002 .scale = 0.0f,
4003 .zeroPoint = 0};
4004 ANeuralNetworksOperandType bwHiddenState = fwHiddenState;
4005 ANeuralNetworksOperandType output = {.type = inputOperandCode,
4006 .dimensionCount = 2,
4007 .dimensions = outputDims,
4008 .scale = 0.0f,
4009 .zeroPoint = 0};
4010 ANeuralNetworksOperandType activation = {.type = ANEURALNETWORKS_INT32,
4011 .dimensionCount = 0,
4012 .dimensions = nullptr,
4013 .scale = 0.0f,
4014 .zeroPoint = 0};
4015 ANeuralNetworksOperandType boolScalar = {.type = ANEURALNETWORKS_BOOL,
4016 .dimensionCount = 0,
4017 .dimensions = nullptr,
4018 .scale = 0.0f,
4019 .zeroPoint = 0};
4020 ANeuralNetworksOperandType timeMajor = boolScalar;
4021 ANeuralNetworksOperandType mergeOutputs = boolScalar;
4022
4023 OperationTestBase rnnTest(ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_RNN,
4024 {input, fwWeights, fwRecurrentWeights, fwBias, fwHiddenState,
4025 bwWeights, bwRecurrentWeights, bwBias, bwHiddenState, input,
4026 fwWeights, bwWeights, activation, timeMajor, mergeOutputs},
4027 {output, output});
4028 rnnTest.testOpsValidations();
4029 }
4030
TEST(OperationValidationTest,BIDIRECTIONAL_SEQUENCE_RNN_float32)4031 TEST(OperationValidationTest, BIDIRECTIONAL_SEQUENCE_RNN_float32) {
4032 bidirectionlSequenceRNNTest(ANEURALNETWORKS_TENSOR_FLOAT32);
4033 }
4034
TEST(OperationValidationTest,BIDIRECTIONAL_SEQUENCE_RNN_float16)4035 TEST(OperationValidationTest, BIDIRECTIONAL_SEQUENCE_RNN_float16) {
4036 bidirectionlSequenceRNNTest(ANEURALNETWORKS_TENSOR_FLOAT16);
4037 }
4038
unidirectionlSequenceRNNTest(int32_t inputOperandCode)4039 void unidirectionlSequenceRNNTest(int32_t inputOperandCode) {
4040 const uint32_t batchSize = 2;
4041 const uint32_t maxTime = 3;
4042 const uint32_t inputSize = 4;
4043 const uint32_t numUnits = 5;
4044
4045 uint32_t inputDims[3] = {maxTime, batchSize, inputSize};
4046 uint32_t weightsDims[2] = {inputSize, numUnits};
4047 uint32_t recurrentWeightsDims[2] = {numUnits, numUnits};
4048 uint32_t biasDims[1] = {numUnits};
4049 uint32_t hiddenStateDims[2] = {batchSize, numUnits};
4050 uint32_t outputDims[2] = {batchSize, numUnits};
4051
4052 ANeuralNetworksOperandType input = {.type = inputOperandCode,
4053 .dimensionCount = 3,
4054 .dimensions = inputDims,
4055 .scale = 0.0f,
4056 .zeroPoint = 0};
4057 ANeuralNetworksOperandType weights = {.type = inputOperandCode,
4058 .dimensionCount = 2,
4059 .dimensions = weightsDims,
4060 .scale = 0.0f,
4061 .zeroPoint = 0};
4062 ANeuralNetworksOperandType recurrentWeights = {.type = inputOperandCode,
4063 .dimensionCount = 2,
4064 .dimensions = recurrentWeightsDims,
4065 .scale = 0.0f,
4066 .zeroPoint = 0};
4067 ANeuralNetworksOperandType bias = {.type = inputOperandCode,
4068 .dimensionCount = 1,
4069 .dimensions = biasDims,
4070 .scale = 0.0f,
4071 .zeroPoint = 0};
4072 ANeuralNetworksOperandType hiddenState = {.type = inputOperandCode,
4073 .dimensionCount = 2,
4074 .dimensions = hiddenStateDims,
4075 .scale = 0.0f,
4076 .zeroPoint = 0};
4077 ANeuralNetworksOperandType output = {.type = inputOperandCode,
4078 .dimensionCount = 2,
4079 .dimensions = outputDims,
4080 .scale = 0.0f,
4081 .zeroPoint = 0};
4082 ANeuralNetworksOperandType intScalar = {.type = ANEURALNETWORKS_INT32,
4083 .dimensionCount = 0,
4084 .dimensions = nullptr,
4085 .scale = 0.0f,
4086 .zeroPoint = 0};
4087 ANeuralNetworksOperandType activation = intScalar;
4088 ANeuralNetworksOperandType timeMajor = intScalar;
4089
4090 OperationTestBase rnnTest(
4091 ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN,
4092 {input, weights, recurrentWeights, bias, hiddenState, activation, timeMajor}, {output});
4093 rnnTest.testOpsValidations();
4094 }
4095
TEST(OperationValidationTest,UNIDIRECTIONAL_SEQUENCE_RNN_float32)4096 TEST(OperationValidationTest, UNIDIRECTIONAL_SEQUENCE_RNN_float32) {
4097 unidirectionlSequenceRNNTest(ANEURALNETWORKS_TENSOR_FLOAT32);
4098 }
4099
TEST(OperationValidationTest,UNIDIRECTIONAL_SEQUENCE_RNN_float16)4100 TEST(OperationValidationTest, UNIDIRECTIONAL_SEQUENCE_RNN_float16) {
4101 unidirectionlSequenceRNNTest(ANEURALNETWORKS_TENSOR_FLOAT16);
4102 }
4103
unidirectionalSequenceLSTMTest(int32_t inputOperandCode)4104 void unidirectionalSequenceLSTMTest(int32_t inputOperandCode) {
4105 const uint32_t maxTime = 2;
4106 const uint32_t batchSize = 3;
4107 const uint32_t numUnits = 4;
4108 const uint32_t inputSize = 5;
4109 const uint32_t outputSize = 6;
4110
4111 uint32_t inputDims[3] = {maxTime, batchSize, inputSize};
4112 uint32_t inputWeightsDims[2] = {numUnits, inputSize};
4113 uint32_t recurrentWeightsDims[2] = {numUnits, outputSize};
4114 uint32_t diagonalDims[1] = {numUnits};
4115 uint32_t projectionDims[2] = {outputSize, numUnits};
4116 uint32_t projectionBiasDims[1] = {outputSize};
4117 uint32_t outputStateDims[2] = {batchSize, outputSize};
4118 uint32_t cellStateDims[2] = {batchSize, numUnits};
4119
4120 uint32_t outputDims[3] = {maxTime, batchSize, outputSize};
4121
4122 ANeuralNetworksOperandType input = {.type = inputOperandCode,
4123 .dimensionCount = 3,
4124 .dimensions = inputDims,
4125 .scale = 0.0f,
4126 .zeroPoint = 0};
4127 ANeuralNetworksOperandType inputToInputWeights = {.type = inputOperandCode,
4128 .dimensionCount = 2,
4129 .dimensions = inputWeightsDims,
4130 .scale = 0.0f,
4131 .zeroPoint = 0};
4132 ANeuralNetworksOperandType inputToForgetWeights = inputToInputWeights;
4133 ANeuralNetworksOperandType inputToCellWeights = inputToInputWeights;
4134 ANeuralNetworksOperandType inputToOutputWeights = inputToInputWeights;
4135 ANeuralNetworksOperandType recurrentToInputWeights = {.type = inputOperandCode,
4136 .dimensionCount = 2,
4137 .dimensions = recurrentWeightsDims,
4138 .scale = 0.0f,
4139 .zeroPoint = 0};
4140 ANeuralNetworksOperandType recurrentToForgetWeights = recurrentToInputWeights;
4141 ANeuralNetworksOperandType recurrentToCellWeights = recurrentToInputWeights;
4142 ANeuralNetworksOperandType recurrentToOutputWeights = recurrentToInputWeights;
4143 ANeuralNetworksOperandType cellToInputWeights = {.type = inputOperandCode,
4144 .dimensionCount = 1,
4145 .dimensions = diagonalDims,
4146 .scale = 0.0f,
4147 .zeroPoint = 0};
4148 ANeuralNetworksOperandType cellToForgetWeights = cellToInputWeights;
4149 ANeuralNetworksOperandType cellToOutputWeights = cellToInputWeights;
4150 ANeuralNetworksOperandType inputGateBias = {.type = inputOperandCode,
4151 .dimensionCount = 1,
4152 .dimensions = diagonalDims,
4153 .scale = 0.0f,
4154 .zeroPoint = 0};
4155 ANeuralNetworksOperandType forgetGateBias = inputGateBias;
4156 ANeuralNetworksOperandType cellGateBias = inputGateBias;
4157 ANeuralNetworksOperandType outputGateBias = inputGateBias;
4158 ANeuralNetworksOperandType projectionWeights = {.type = inputOperandCode,
4159 .dimensionCount = 2,
4160 .dimensions = projectionDims,
4161 .scale = 0.0f,
4162 .zeroPoint = 0};
4163 ANeuralNetworksOperandType projectionBias = {.type = inputOperandCode,
4164 .dimensionCount = 1,
4165 .dimensions = projectionBiasDims,
4166 .scale = 0.0f,
4167 .zeroPoint = 0};
4168 ANeuralNetworksOperandType outputStateIn = {.type = inputOperandCode,
4169 .dimensionCount = 2,
4170 .dimensions = outputStateDims,
4171 .scale = 0.0f,
4172 .zeroPoint = 0};
4173 ANeuralNetworksOperandType cellStateIn = {.type = inputOperandCode,
4174 .dimensionCount = 2,
4175 .dimensions = cellStateDims,
4176 .scale = 0.0f,
4177 .zeroPoint = 0};
4178 ANeuralNetworksOperandType intScalar = {
4179 .type = ANEURALNETWORKS_INT32,
4180 .dimensionCount = 0,
4181 .dimensions = nullptr,
4182 .scale = 0.0f,
4183 .zeroPoint = 0,
4184 };
4185 ANeuralNetworksOperandType activation = intScalar;
4186 ANeuralNetworksOperandType floatScalar = {
4187 .type = inputOperandCode == ANEURALNETWORKS_TENSOR_FLOAT32 ? ANEURALNETWORKS_FLOAT32
4188 : ANEURALNETWORKS_FLOAT16,
4189 .dimensionCount = 0,
4190 .dimensions = nullptr,
4191 .scale = 0.0f,
4192 .zeroPoint = 0,
4193 };
4194 ANeuralNetworksOperandType cellClip = floatScalar;
4195 ANeuralNetworksOperandType projClip = floatScalar;
4196 ANeuralNetworksOperandType boolScalar = {
4197 .type = ANEURALNETWORKS_BOOL,
4198 .dimensionCount = 0,
4199 .dimensions = nullptr,
4200 .scale = 0.0f,
4201 .zeroPoint = 0,
4202 };
4203 ANeuralNetworksOperandType timeMajor = boolScalar;
4204 ANeuralNetworksOperandType inputLayerNormWeights = {.type = inputOperandCode,
4205 .dimensionCount = 1,
4206 .dimensions = diagonalDims,
4207 .scale = 0.0f,
4208 .zeroPoint = 0};
4209 ANeuralNetworksOperandType forgetLayerNormWeights = inputLayerNormWeights;
4210 ANeuralNetworksOperandType cellLayerNormWeights = inputLayerNormWeights;
4211 ANeuralNetworksOperandType outputLayerNormWeights = inputLayerNormWeights;
4212
4213 ANeuralNetworksOperandType output = {.type = inputOperandCode,
4214 .dimensionCount = 3,
4215 .dimensions = outputDims,
4216 .scale = 0.0f,
4217 .zeroPoint = 0};
4218
4219 OperationTestBase ulstmTest(ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_LSTM,
4220 {input,
4221 inputToInputWeights,
4222 inputToForgetWeights,
4223 inputToCellWeights,
4224 inputToOutputWeights,
4225 recurrentToInputWeights,
4226 recurrentToForgetWeights,
4227 recurrentToCellWeights,
4228 recurrentToOutputWeights,
4229 cellToInputWeights,
4230 cellToForgetWeights,
4231 cellToOutputWeights,
4232 inputGateBias,
4233 forgetGateBias,
4234 cellGateBias,
4235 outputGateBias,
4236 projectionWeights,
4237 projectionBias,
4238 outputStateIn,
4239 cellStateIn,
4240 activation,
4241 cellClip,
4242 projClip,
4243 timeMajor,
4244 inputLayerNormWeights,
4245 forgetLayerNormWeights,
4246 cellLayerNormWeights,
4247 outputLayerNormWeights},
4248 {output});
4249 ulstmTest.testOpsValidations();
4250 }
4251
TEST(OperationValidationTest,UNIDIRECTIONAL_SEQUENCE_LSTM_float32)4252 TEST(OperationValidationTest, UNIDIRECTIONAL_SEQUENCE_LSTM_float32) {
4253 unidirectionalSequenceLSTMTest(ANEURALNETWORKS_TENSOR_FLOAT32);
4254 }
4255
TEST(OperationValidationTest,UNIDIRECTIONAL_SEQUENCE_LSTM_float16)4256 TEST(OperationValidationTest, UNIDIRECTIONAL_SEQUENCE_LSTM_float16) {
4257 unidirectionalSequenceLSTMTest(ANEURALNETWORKS_TENSOR_FLOAT16);
4258 }
4259
generateProposalsOpTest(int32_t scoreOperandCode,int32_t deltaOperandCode,int32_t anchorOperandCode,int32_t roiOperandCode,int32_t scalarOperandCode)4260 void generateProposalsOpTest(int32_t scoreOperandCode, int32_t deltaOperandCode,
4261 int32_t anchorOperandCode, int32_t roiOperandCode,
4262 int32_t scalarOperandCode) {
4263 uint32_t scoreDim[] = {1, 2, 2, 2}, deltaDim[] = {1, 2, 2, 8}, anchorDim[] = {2, 4},
4264 imageInfoDim[] = {1, 2};
4265 uint32_t outScoreDim[] = {4}, outRoiDim[] = {4, 4}, outSplitDim[] = {1};
4266 OperationTestBase generateProposalsTest(
4267 ANEURALNETWORKS_GENERATE_PROPOSALS,
4268 {getOpType(scoreOperandCode, 4, scoreDim), getOpType(deltaOperandCode, 4, deltaDim),
4269 getOpType(anchorOperandCode, 2, anchorDim), getOpType(roiOperandCode, 2, imageInfoDim),
4270 getOpType(scalarOperandCode), getOpType(scalarOperandCode),
4271 getOpType(ANEURALNETWORKS_INT32), getOpType(ANEURALNETWORKS_INT32),
4272 getOpType(scalarOperandCode), getOpType(scalarOperandCode),
4273 getOpType(ANEURALNETWORKS_BOOL)},
4274 {getOpType(scoreOperandCode, 1, outScoreDim), getOpType(roiOperandCode, 2, outRoiDim),
4275 getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, outSplitDim)});
4276 generateProposalsTest.testOpsValidations();
4277 }
4278
TEST(OperationValidationTest,GENERATE_PROPOSALS_float16)4279 TEST(OperationValidationTest, GENERATE_PROPOSALS_float16) {
4280 generateProposalsOpTest(ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_TENSOR_FLOAT16,
4281 ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_TENSOR_FLOAT16,
4282 ANEURALNETWORKS_FLOAT16);
4283 }
4284
TEST(OperationValidationTest,GENERATE_PROPOSALS_float32)4285 TEST(OperationValidationTest, GENERATE_PROPOSALS_float32) {
4286 generateProposalsOpTest(ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_TENSOR_FLOAT32,
4287 ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_TENSOR_FLOAT32,
4288 ANEURALNETWORKS_FLOAT32);
4289 }
4290
TEST(OperationValidationTest,GENERATE_PROPOSALS_quant)4291 TEST(OperationValidationTest, GENERATE_PROPOSALS_quant) {
4292 generateProposalsOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM,
4293 ANEURALNETWORKS_TENSOR_QUANT8_ASYMM,
4294 ANEURALNETWORKS_TENSOR_QUANT16_SYMM,
4295 ANEURALNETWORKS_TENSOR_QUANT16_ASYMM, ANEURALNETWORKS_FLOAT32);
4296 }
4297
TEST(OperationValidationTest,GENERATE_PROPOSALS_quant_signed)4298 TEST(OperationValidationTest, GENERATE_PROPOSALS_quant_signed) {
4299 generateProposalsOpTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED,
4300 ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED,
4301 ANEURALNETWORKS_TENSOR_QUANT16_SYMM,
4302 ANEURALNETWORKS_TENSOR_QUANT16_ASYMM, ANEURALNETWORKS_FLOAT32);
4303 }
4304
resizeNearestNeighborTest(int32_t inputCode,int32_t scalarCode)4305 void resizeNearestNeighborTest(int32_t inputCode, int32_t scalarCode) {
4306 uint32_t inputDim[] = {1, 2, 2, 1}, outputDim[] = {1, 1, 1, 1};
4307 OperationTestBase resizeImageOpTest(ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR,
4308 {getOpType(inputCode, 4, inputDim), getOpType(scalarCode),
4309 getOpType(scalarCode), getOpType(ANEURALNETWORKS_BOOL)},
4310 {getOpType(inputCode, 4, outputDim)});
4311 resizeImageOpTest.testOpsValidations();
4312 }
4313
TEST(OperationValidationTest,RESIZE_NEAREST_NEIGHBOR)4314 TEST(OperationValidationTest, RESIZE_NEAREST_NEIGHBOR) {
4315 resizeNearestNeighborTest(ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_INT32);
4316 resizeNearestNeighborTest(ANEURALNETWORKS_TENSOR_FLOAT32, ANEURALNETWORKS_FLOAT32);
4317 }
4318
TEST(OperationValidationTest,RESIZE_NEAREST_NEIGHBOR_float16)4319 TEST(OperationValidationTest, RESIZE_NEAREST_NEIGHBOR_float16) {
4320 resizeNearestNeighborTest(ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_INT32);
4321 resizeNearestNeighborTest(ANEURALNETWORKS_TENSOR_FLOAT16, ANEURALNETWORKS_FLOAT16);
4322 }
4323
TEST(OperationValidationTest,RESIZE_NEAREST_NEIGHBOR_quant8)4324 TEST(OperationValidationTest, RESIZE_NEAREST_NEIGHBOR_quant8) {
4325 resizeNearestNeighborTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, ANEURALNETWORKS_INT32);
4326 resizeNearestNeighborTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, ANEURALNETWORKS_FLOAT32);
4327 }
4328
TEST(OperationValidationTest,RESIZE_NEAREST_NEIGHBOR_quant8_signed)4329 TEST(OperationValidationTest, RESIZE_NEAREST_NEIGHBOR_quant8_signed) {
4330 resizeNearestNeighborTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED, ANEURALNETWORKS_INT32);
4331 resizeNearestNeighborTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED, ANEURALNETWORKS_FLOAT32);
4332 }
4333
TEST(OperationValidationTest,QUANTIZED_LSTM)4334 TEST(OperationValidationTest, QUANTIZED_LSTM) {
4335 uint32_t oneDimensional[1] = {5};
4336 uint32_t twoDimensional[2] = {5, 5};
4337
4338 ANeuralNetworksOperandType quant8AsymSignedTensor2D = {
4339 .type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED,
4340 .dimensionCount = 2,
4341 .dimensions = twoDimensional,
4342 .scale = 0.0078125,
4343 .zeroPoint = 0,
4344 };
4345 ANeuralNetworksOperandType quant8SymTensor2D = {
4346 .type = ANEURALNETWORKS_TENSOR_QUANT8_SYMM,
4347 .dimensionCount = 2,
4348 .dimensions = twoDimensional,
4349 .scale = 0.0078125,
4350 .zeroPoint = 0,
4351 };
4352 ANeuralNetworksOperandType quant16SymTensor1D = {
4353 .type = ANEURALNETWORKS_TENSOR_QUANT16_SYMM,
4354 .dimensionCount = 1,
4355 .dimensions = oneDimensional,
4356 .scale = 1.0,
4357 .zeroPoint = 0,
4358 };
4359 ANeuralNetworksOperandType quant16SymTensor2D = {
4360 .type = ANEURALNETWORKS_TENSOR_QUANT16_SYMM,
4361 .dimensionCount = 2,
4362 .dimensions = twoDimensional,
4363 .scale = 1.0,
4364 .zeroPoint = 0,
4365 };
4366 ANeuralNetworksOperandType int32Tensor1D = {
4367 .type = ANEURALNETWORKS_TENSOR_INT32,
4368 .dimensionCount = 1,
4369 .dimensions = oneDimensional,
4370 .scale = 4.65661e-08,
4371 .zeroPoint = 0,
4372 };
4373 ANeuralNetworksOperandType int32Scalar = {
4374 .type = ANEURALNETWORKS_INT32,
4375 };
4376 ANeuralNetworksOperandType float32Scalar = {
4377 .type = ANEURALNETWORKS_FLOAT32,
4378 };
4379
4380 ANeuralNetworksOperandType input = quant8AsymSignedTensor2D;
4381 ANeuralNetworksOperandType input_to_input_weights = quant8SymTensor2D;
4382 ANeuralNetworksOperandType input_to_forget_weights = quant8SymTensor2D;
4383 ANeuralNetworksOperandType input_to_cell_weights = quant8SymTensor2D;
4384 ANeuralNetworksOperandType input_to_output_weights = quant8SymTensor2D;
4385 ANeuralNetworksOperandType recurrent_to_input_weights = quant8SymTensor2D;
4386 ANeuralNetworksOperandType recurrent_to_forget_weights = quant8SymTensor2D;
4387 ANeuralNetworksOperandType recurrent_to_cell_weights = quant8SymTensor2D;
4388 ANeuralNetworksOperandType recurrent_to_output_weights = quant8SymTensor2D;
4389 ANeuralNetworksOperandType cell_to_input_weights = quant16SymTensor2D;
4390 ANeuralNetworksOperandType cell_to_forget_weights = quant16SymTensor2D;
4391 ANeuralNetworksOperandType cell_to_output_weights = quant16SymTensor2D;
4392 ANeuralNetworksOperandType input_gate_bias = int32Tensor1D;
4393 ANeuralNetworksOperandType forget_gate_bias = int32Tensor1D;
4394 ANeuralNetworksOperandType cell_gate_bias = int32Tensor1D;
4395 ANeuralNetworksOperandType output_gate_bias = int32Tensor1D;
4396 ANeuralNetworksOperandType projection_weights = quant8SymTensor2D;
4397 ANeuralNetworksOperandType projection_bias = int32Tensor1D;
4398 ANeuralNetworksOperandType output_state_in = quant8AsymSignedTensor2D;
4399 ANeuralNetworksOperandType cell_state_in = quant16SymTensor2D;
4400 ANeuralNetworksOperandType input_layer_norm_weights = quant16SymTensor1D;
4401 ANeuralNetworksOperandType forget_layer_norm_weights = quant16SymTensor1D;
4402 ANeuralNetworksOperandType cell_layer_norm_weights = quant16SymTensor1D;
4403 ANeuralNetworksOperandType output_layer_norm_weights = quant16SymTensor1D;
4404 ANeuralNetworksOperandType cell_clip = float32Scalar;
4405 ANeuralNetworksOperandType projection_clip = float32Scalar;
4406 ANeuralNetworksOperandType input_intermediate_scale = float32Scalar;
4407 ANeuralNetworksOperandType forget_intermediate_scale = float32Scalar;
4408 ANeuralNetworksOperandType cell_intermediate_scale = float32Scalar;
4409 ANeuralNetworksOperandType output_intermediate_scale = float32Scalar;
4410 ANeuralNetworksOperandType hidden_state_zero_point = int32Scalar;
4411 ANeuralNetworksOperandType hidden_state_scale = float32Scalar;
4412
4413 ANeuralNetworksOperandType output_state_out = quant8AsymSignedTensor2D;
4414 ANeuralNetworksOperandType cell_state_out = quant16SymTensor2D;
4415 ANeuralNetworksOperandType output = quant8AsymSignedTensor2D;
4416
4417 OperationTestBase test(ANEURALNETWORKS_QUANTIZED_LSTM,
4418 {input,
4419 input_to_input_weights,
4420 input_to_forget_weights,
4421 input_to_cell_weights,
4422 input_to_output_weights,
4423 recurrent_to_input_weights,
4424 recurrent_to_forget_weights,
4425 recurrent_to_cell_weights,
4426 recurrent_to_output_weights,
4427 cell_to_input_weights,
4428 cell_to_forget_weights,
4429 cell_to_output_weights,
4430 input_gate_bias,
4431 forget_gate_bias,
4432 cell_gate_bias,
4433 output_gate_bias,
4434 projection_weights,
4435 projection_bias,
4436 output_state_in,
4437 cell_state_in,
4438 input_layer_norm_weights,
4439 forget_layer_norm_weights,
4440 cell_layer_norm_weights,
4441 output_layer_norm_weights,
4442 cell_clip,
4443 projection_clip,
4444 input_intermediate_scale,
4445 forget_intermediate_scale,
4446 cell_intermediate_scale,
4447 output_intermediate_scale,
4448 hidden_state_zero_point,
4449 hidden_state_scale},
4450 {output_state_out, cell_state_out, output});
4451 test.testOpsValidations();
4452 }
4453
fillTest(int32_t valueOperandType,int32_t outputOperandType)4454 void fillTest(int32_t valueOperandType, int32_t outputOperandType) {
4455 uint32_t inputDimensions[1] = {3};
4456 ANeuralNetworksOperandType input0 = getOpType(ANEURALNETWORKS_TENSOR_INT32, 1, inputDimensions);
4457 ANeuralNetworksOperandType input1 = getOpType(valueOperandType);
4458 uint32_t outputDimensions[3] = {3, 4, 5};
4459 ANeuralNetworksOperandType output = getOpType(outputOperandType, 3, outputDimensions);
4460 OperationTestBase test(ANEURALNETWORKS_FILL, {input0, input1}, {output});
4461 test.testOpsValidations();
4462 }
4463
TEST(OperationValidationTest,FILL_float16)4464 TEST(OperationValidationTest, FILL_float16) {
4465 fillTest(ANEURALNETWORKS_FLOAT16, ANEURALNETWORKS_TENSOR_FLOAT16);
4466 }
4467
TEST(OperationValidationTest,FILL_float32)4468 TEST(OperationValidationTest, FILL_float32) {
4469 fillTest(ANEURALNETWORKS_FLOAT32, ANEURALNETWORKS_TENSOR_FLOAT32);
4470 }
4471
TEST(OperationValidationTest,FILL_int32)4472 TEST(OperationValidationTest, FILL_int32) {
4473 fillTest(ANEURALNETWORKS_INT32, ANEURALNETWORKS_TENSOR_INT32);
4474 }
4475
rankTest(int32_t inputOperandType)4476 void rankTest(int32_t inputOperandType) {
4477 uint32_t inputDimensions[3] = {3, 4, 5};
4478 ANeuralNetworksOperandType input = getOpType(inputOperandType, 3, inputDimensions);
4479 ANeuralNetworksOperandType output = getOpType(ANEURALNETWORKS_INT32);
4480 OperationTestBase test(ANEURALNETWORKS_RANK, {input}, {output});
4481 test.testOpsValidations();
4482 }
4483
TEST(OperationValidationTest,RANK_float16)4484 TEST(OperationValidationTest, RANK_float16) {
4485 rankTest(ANEURALNETWORKS_TENSOR_FLOAT16);
4486 }
4487
TEST(OperationValidationTest,RANK_float32)4488 TEST(OperationValidationTest, RANK_float32) {
4489 rankTest(ANEURALNETWORKS_TENSOR_FLOAT32);
4490 }
4491
TEST(OperationValidationTest,RANK_int32)4492 TEST(OperationValidationTest, RANK_int32) {
4493 rankTest(ANEURALNETWORKS_TENSOR_INT32);
4494 }
4495
TEST(OperationValidationTest,RANK_quant8)4496 TEST(OperationValidationTest, RANK_quant8) {
4497 rankTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
4498 }
4499
TEST(OperationValidationTest,RANK_quant8_signed)4500 TEST(OperationValidationTest, RANK_quant8_signed) {
4501 rankTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
4502 }
4503
batchMatmulTest(int32_t operandType)4504 void batchMatmulTest(int32_t operandType) {
4505 uint32_t inputLHSDimensions[3] = {1, 2, 3};
4506 ANeuralNetworksOperandType input0 = getOpType(operandType, 3, inputLHSDimensions);
4507 uint32_t inputRHSDimensions[3] = {1, 3, 4};
4508 ANeuralNetworksOperandType input1 = getOpType(operandType, 3, inputRHSDimensions);
4509 ANeuralNetworksOperandType input2 = getOpType(ANEURALNETWORKS_BOOL);
4510 ANeuralNetworksOperandType input3 = getOpType(ANEURALNETWORKS_BOOL);
4511 uint32_t outputDimensions[3] = {1, 2, 4};
4512 ANeuralNetworksOperandType output = getOpType(operandType, 3, outputDimensions);
4513 OperationTestBase test(ANEURALNETWORKS_BATCH_MATMUL, {input0, input1, input2, input3},
4514 {output});
4515 test.testOpsValidations();
4516 }
4517
TEST(OperationValidationTest,BATCH_MATMUL_float16)4518 TEST(OperationValidationTest, BATCH_MATMUL_float16) {
4519 batchMatmulTest(ANEURALNETWORKS_TENSOR_FLOAT16);
4520 }
4521
TEST(OperationValidationTest,BATCH_MATMUL_float32)4522 TEST(OperationValidationTest, BATCH_MATMUL_float32) {
4523 batchMatmulTest(ANEURALNETWORKS_TENSOR_FLOAT32);
4524 }
4525
TEST(OperationValidationTest,BATCH_MATMUL_int32)4526 TEST(OperationValidationTest, BATCH_MATMUL_int32) {
4527 batchMatmulTest(ANEURALNETWORKS_TENSOR_INT32);
4528 }
4529
TEST(OperationValidationTest,BATCH_MATMUL_quant8_signed)4530 TEST(OperationValidationTest, BATCH_MATMUL_quant8_signed) {
4531 batchMatmulTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
4532 }
4533
makeIdentityModel(const ANeuralNetworksOperandType * type)4534 ANeuralNetworksModel* makeIdentityModel(const ANeuralNetworksOperandType* type) {
4535 ANeuralNetworksModel* model = nullptr;
4536 EXPECT_EQ(ANeuralNetworksModel_create(&model), ANEURALNETWORKS_NO_ERROR);
4537 EXPECT_EQ(ANeuralNetworksModel_addOperand(model, type), ANEURALNETWORKS_NO_ERROR);
4538 EXPECT_EQ(ANeuralNetworksModel_addOperand(model, type), ANEURALNETWORKS_NO_ERROR);
4539 uint32_t inputs[] = {0};
4540 uint32_t outputs[] = {1};
4541 EXPECT_EQ(ANeuralNetworksModel_addOperation(model, ANEURALNETWORKS_CAST, std::size(inputs),
4542 inputs, std::size(outputs), outputs),
4543 ANEURALNETWORKS_NO_ERROR);
4544 EXPECT_EQ(ANeuralNetworksModel_identifyInputsAndOutputs(model, std::size(inputs), inputs,
4545 std::size(outputs), outputs),
4546 ANEURALNETWORKS_NO_ERROR);
4547 EXPECT_EQ(ANeuralNetworksModel_finish(model), ANEURALNETWORKS_NO_ERROR);
4548 return model;
4549 }
4550
testIf(const std::vector<uint32_t> & outerDims,const ANeuralNetworksModel * thenModel,const ANeuralNetworksModel * elseModel,bool testMutations)4551 void testIf(const std::vector<uint32_t>& outerDims, const ANeuralNetworksModel* thenModel,
4552 const ANeuralNetworksModel* elseModel, bool testMutations) {
4553 const uint32_t kThenOperand = 1;
4554 const uint32_t kElseOperand = 2;
4555 const uint32_t boolDims[] = {1};
4556 ANeuralNetworksOperandType boolType =
4557 getOpType(ANEURALNETWORKS_TENSOR_BOOL8, std::size(boolDims), boolDims);
4558 ANeuralNetworksOperandType dataType =
4559 getOpType(ANEURALNETWORKS_TENSOR_FLOAT32, outerDims.size(), outerDims.data());
4560 ANeuralNetworksOperandType modelType = getOpType(ANEURALNETWORKS_MODEL);
4561 OperationTestBase test(ANEURALNETWORKS_IF, {boolType, modelType, modelType, dataType},
4562 {dataType});
4563 test.setInputOperandValueFromModel(kThenOperand, thenModel);
4564 test.setInputOperandValueFromModel(kElseOperand, elseModel);
4565 if (testMutations) {
4566 test.testOpsValidations();
4567 } else {
4568 EXPECT_TRUE(test.testSuccess());
4569 }
4570 }
4571
testIf(const std::vector<uint32_t> & outerDims,const std::vector<uint32_t> & thenDims,const std::vector<uint32_t> & elseDims,bool testMutations)4572 void testIf(const std::vector<uint32_t>& outerDims, const std::vector<uint32_t>& thenDims,
4573 const std::vector<uint32_t>& elseDims, bool testMutations) {
4574 ANeuralNetworksOperandType thenDataType =
4575 getOpType(ANEURALNETWORKS_TENSOR_FLOAT32, thenDims.size(), thenDims.data());
4576 ANeuralNetworksOperandType elseDataType =
4577 getOpType(ANEURALNETWORKS_TENSOR_FLOAT32, elseDims.size(), elseDims.data());
4578 ANeuralNetworksModel* thenModel = makeIdentityModel(&thenDataType);
4579 ANeuralNetworksModel* elseModel = makeIdentityModel(&elseDataType);
4580 testIf(outerDims, thenModel, elseModel, testMutations);
4581 ANeuralNetworksModel_free(thenModel);
4582 ANeuralNetworksModel_free(elseModel);
4583 }
4584
TEST(OperationValidationTest,IF)4585 TEST(OperationValidationTest, IF) {
4586 const std::vector<std::pair<std::string, std::vector<uint32_t>>> configurations = {
4587 {"fully specified", {1, 2, 3}},
4588 {"unknown dimensions", {0, 2, 0}},
4589 {"unknown rank", {}},
4590 };
4591 // We skip mutation testing for all but the first configuration to avoid the
4592 // exponential runtime blowup. The value of additional operand code and
4593 // count mutations is negligible because whether the shapes are fully
4594 // specified should have nothing to do with the operand code or count.
4595 bool testMutations = true;
4596 for (const auto& [outerTrace, outerDims] : configurations) {
4597 SCOPED_TRACE(testing::Message() << "outerDims: " << outerTrace);
4598 for (const auto& [thenTrace, thenDims] : configurations) {
4599 SCOPED_TRACE(testing::Message() << "thenDims: " << thenTrace);
4600 for (const auto& [elseTrace, elseDims] : configurations) {
4601 SCOPED_TRACE(testing::Message() << "elseDims: " << elseTrace);
4602 testIf(outerDims, thenDims, elseDims, testMutations);
4603 testMutations = false;
4604 }
4605 }
4606 }
4607 }
4608
4609 // operand 0 --> +------+
4610 // | LESS | --> operand 2
4611 // operand 1 --> +------+
4612 //
makeWhileCondModel(const ANeuralNetworksOperandType * dataType,const ANeuralNetworksOperandType * boolType)4613 ANeuralNetworksModel* makeWhileCondModel(const ANeuralNetworksOperandType* dataType,
4614 const ANeuralNetworksOperandType* boolType) {
4615 ANeuralNetworksModel* model = nullptr;
4616 EXPECT_EQ(ANeuralNetworksModel_create(&model), ANEURALNETWORKS_NO_ERROR);
4617 EXPECT_EQ(ANeuralNetworksModel_addOperand(model, dataType), ANEURALNETWORKS_NO_ERROR);
4618 EXPECT_EQ(ANeuralNetworksModel_addOperand(model, dataType), ANEURALNETWORKS_NO_ERROR);
4619 EXPECT_EQ(ANeuralNetworksModel_addOperand(model, boolType), ANEURALNETWORKS_NO_ERROR);
4620 const uint32_t inputs[] = {0, 1};
4621 const uint32_t outputs[] = {2};
4622 EXPECT_EQ(ANeuralNetworksModel_addOperation(model, ANEURALNETWORKS_LESS, std::size(inputs),
4623 inputs, std::size(outputs), outputs),
4624 ANEURALNETWORKS_NO_ERROR);
4625 EXPECT_EQ(ANeuralNetworksModel_identifyInputsAndOutputs(model, std::size(inputs), inputs,
4626 std::size(outputs), outputs),
4627 ANEURALNETWORKS_NO_ERROR);
4628 EXPECT_EQ(ANeuralNetworksModel_finish(model), ANEURALNETWORKS_NO_ERROR);
4629 return model;
4630 }
4631
4632 // +------+
4633 // operand 0 --> | CAST | --> operand 2
4634 // +------+
4635 //
4636 // operand 1 --> (unused)
4637 //
makeWhileBodyModel(const ANeuralNetworksOperandType * type)4638 ANeuralNetworksModel* makeWhileBodyModel(const ANeuralNetworksOperandType* type) {
4639 ANeuralNetworksModel* model = nullptr;
4640 EXPECT_EQ(ANeuralNetworksModel_create(&model), ANEURALNETWORKS_NO_ERROR);
4641 EXPECT_EQ(ANeuralNetworksModel_addOperand(model, type), ANEURALNETWORKS_NO_ERROR);
4642 EXPECT_EQ(ANeuralNetworksModel_addOperand(model, type), ANEURALNETWORKS_NO_ERROR);
4643 EXPECT_EQ(ANeuralNetworksModel_addOperand(model, type), ANEURALNETWORKS_NO_ERROR);
4644 const uint32_t castInputs[] = {0};
4645 const uint32_t castOutputs[] = {2};
4646 EXPECT_EQ(ANeuralNetworksModel_addOperation(model, ANEURALNETWORKS_CAST, std::size(castInputs),
4647 castInputs, std::size(castOutputs), castOutputs),
4648 ANEURALNETWORKS_NO_ERROR);
4649 const uint32_t modelInputs[] = {0, 1};
4650 const uint32_t modelOutputs[] = {2};
4651 EXPECT_EQ(ANeuralNetworksModel_identifyInputsAndOutputs(model, std::size(modelInputs),
4652 modelInputs, std::size(modelOutputs),
4653 modelOutputs),
4654 ANEURALNETWORKS_NO_ERROR);
4655 EXPECT_EQ(ANeuralNetworksModel_finish(model), ANEURALNETWORKS_NO_ERROR);
4656 return model;
4657 }
4658
testWhile(const std::vector<uint32_t> & outerDims,const ANeuralNetworksModel * condModel,const ANeuralNetworksModel * bodyModel,bool testMutations)4659 void testWhile(const std::vector<uint32_t>& outerDims, const ANeuralNetworksModel* condModel,
4660 const ANeuralNetworksModel* bodyModel, bool testMutations) {
4661 const uint32_t kCondOperand = 0;
4662 const uint32_t kBodyOperand = 1;
4663 ANeuralNetworksOperandType modelType = getOpType(ANEURALNETWORKS_MODEL);
4664 ANeuralNetworksOperandType dataType =
4665 getOpType(ANEURALNETWORKS_TENSOR_FLOAT32, outerDims.size(), outerDims.data());
4666 OperationTestBase test(ANEURALNETWORKS_WHILE, {modelType, modelType, dataType, dataType},
4667 {dataType});
4668 test.setInputOperandValueFromModel(kCondOperand, condModel);
4669 test.setInputOperandValueFromModel(kBodyOperand, bodyModel);
4670 if (testMutations) {
4671 test.testOpsValidations();
4672 } else {
4673 EXPECT_TRUE(test.testSuccess());
4674 }
4675 }
4676
testWhile(const std::vector<uint32_t> & outerDims,const std::vector<uint32_t> & condDims,const std::vector<uint32_t> & bodyDims,bool testMutations)4677 void testWhile(const std::vector<uint32_t>& outerDims, const std::vector<uint32_t>& condDims,
4678 const std::vector<uint32_t>& bodyDims, bool testMutations) {
4679 const uint32_t boolDims[] = {1};
4680 ANeuralNetworksOperandType boolType =
4681 getOpType(ANEURALNETWORKS_TENSOR_BOOL8, std::size(boolDims), boolDims);
4682 ANeuralNetworksOperandType condDataType =
4683 getOpType(ANEURALNETWORKS_TENSOR_FLOAT32, condDims.size(), condDims.data());
4684 ANeuralNetworksOperandType bodyDataType =
4685 getOpType(ANEURALNETWORKS_TENSOR_FLOAT32, bodyDims.size(), bodyDims.data());
4686 ANeuralNetworksModel* condModel = makeWhileCondModel(&condDataType, &boolType);
4687 ANeuralNetworksModel* bodyModel = makeWhileBodyModel(&bodyDataType);
4688 testWhile(outerDims, condModel, bodyModel, testMutations);
4689 ANeuralNetworksModel_free(condModel);
4690 ANeuralNetworksModel_free(bodyModel);
4691 }
4692
TEST(OperationValidationTest,WHILE)4693 TEST(OperationValidationTest, WHILE) {
4694 const std::vector<std::pair<std::string, std::vector<uint32_t>>> configurations = {
4695 {"fully specified", {1, 2, 3}},
4696 {"unknown dimensions", {0, 2, 0}},
4697 {"unknown rank", {}},
4698 };
4699 // We skip mutation testing for all but the first configuration to avoid the
4700 // exponential runtime blowup. The value of additional operand code and
4701 // count mutations is negligible because whether the shapes are fully
4702 // specified should have nothing to do with the operand code or count.
4703 bool testMutations = true;
4704 for (const auto& [outerTrace, outerDims] : configurations) {
4705 SCOPED_TRACE(testing::Message() << "outerDims: " << outerTrace);
4706 for (const auto& [condTrace, condDims] : configurations) {
4707 SCOPED_TRACE(testing::Message() << "condDims: " << condTrace);
4708 for (const auto& [bodyTrace, bodyDims] : configurations) {
4709 SCOPED_TRACE(testing::Message() << "bodyDims: " << bodyTrace);
4710 testWhile(outerDims, condDims, bodyDims, testMutations);
4711 testMutations = false;
4712 }
4713 }
4714 }
4715 }
4716
4717 constexpr ANeuralNetworksOperandType packAxisType = {.type = ANEURALNETWORKS_INT32,
4718 .dimensionCount = 0,
4719 .dimensions = nullptr,
4720 .scale = 0.0f,
4721 .zeroPoint = 0};
4722
packTest(int32_t operandCode)4723 void packTest(int32_t operandCode) {
4724 const uint32_t inputDimensions[3] = {4, 5, 6};
4725 constexpr size_t inputRank = sizeof(inputDimensions) / sizeof(inputDimensions[0]);
4726 const ANeuralNetworksOperandType inputTensorType =
4727 getOpType(operandCode, inputRank, inputDimensions);
4728
4729 constexpr uint32_t outputRank = inputRank + 1;
4730
4731 for (uint32_t axis = 0; axis < outputRank; ++axis) {
4732 SCOPED_TRACE(axis);
4733 for (uint32_t inputTensorCount : {1, 2}) {
4734 SCOPED_TRACE(inputTensorCount);
4735 uint32_t outputDimensions[outputRank];
4736 for (uint32_t inDim = 0, outDim = 0; outDim < outputRank; ++outDim) {
4737 if (outDim == axis) {
4738 outputDimensions[outDim] = inputTensorCount;
4739 } else {
4740 outputDimensions[outDim] = inputDimensions[inDim++];
4741 }
4742 }
4743 const ANeuralNetworksOperandType outputTensorType =
4744 getOpType(operandCode, outputRank, outputDimensions);
4745
4746 std::vector<ANeuralNetworksOperandType> validInputs = {packAxisType};
4747 validInputs.insert(validInputs.end(), inputTensorCount, inputTensorType);
4748
4749 OperationTestBase packTest(ANEURALNETWORKS_PACK, validInputs, {outputTensorType});
4750 packTest.testOpsValidations();
4751 }
4752 }
4753 }
4754
TEST(OperationValidationTest,PACK_float16)4755 TEST(OperationValidationTest, PACK_float16) {
4756 packTest(ANEURALNETWORKS_TENSOR_FLOAT16);
4757 }
TEST(OperationValidationTest,PACK_float32)4758 TEST(OperationValidationTest, PACK_float32) {
4759 packTest(ANEURALNETWORKS_TENSOR_FLOAT32);
4760 }
4761
TEST(OperationValidationTest,PACK_quant8)4762 TEST(OperationValidationTest, PACK_quant8) {
4763 packTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
4764 }
4765
TEST(OperationValidationTest,PACK_quant8_signed)4766 TEST(OperationValidationTest, PACK_quant8_signed) {
4767 packTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
4768 }
4769
TEST(OperationValidationTest,PACK_int32)4770 TEST(OperationValidationTest, PACK_int32) {
4771 packTest(ANEURALNETWORKS_TENSOR_INT32);
4772 }
4773
4774 // Test quantization parameters that are inconsistent among operands.
packTestBadQuantization(int32_t operandCode,BadQuantization bad)4775 void packTestBadQuantization(int32_t operandCode, BadQuantization bad) {
4776 constexpr uint32_t inputTensorCount = 2;
4777 const uint32_t inputDimensions[3] = {4, 5, 6};
4778 constexpr size_t inputRank = sizeof(inputDimensions) / sizeof(inputDimensions[0]);
4779 const ANeuralNetworksOperandType inputTensorType =
4780 getOpType(operandCode, inputRank, inputDimensions);
4781
4782 // Behave as if the axis equals inputRank.
4783 constexpr uint32_t outputRank = inputRank + 1;
4784 uint32_t outputDimensions[outputRank];
4785 std::copy(std::begin(inputDimensions), std::end(inputDimensions), std::begin(outputDimensions));
4786 outputDimensions[inputRank] = inputTensorCount;
4787
4788 // The "deviant" is the operand whose quantization parameters are to be made
4789 // inconsistent with those of the other operands.
4790 // inputTensorCount = Change the output tensor.
4791 // [0, inputTensorCount) = Change the corresponding input tensor.
4792 for (uint32_t deviant = 0; deviant <= inputTensorCount; ++deviant) {
4793 SCOPED_TRACE(deviant);
4794 std::vector<ANeuralNetworksOperandType> inputTypes = {packAxisType};
4795 inputTypes.insert(inputTypes.end(), inputTensorCount, inputTensorType);
4796 ANeuralNetworksOperandType outputType =
4797 getOpType(operandCode, outputRank, outputDimensions);
4798 if (deviant == inputTensorCount) {
4799 scramble(&outputType, bad);
4800 } else {
4801 scramble(&inputTypes[1 + deviant], bad);
4802 }
4803 OperationTestBase packTest(ANEURALNETWORKS_PACK, inputTypes, {outputType});
4804 if (bad == BadQuantization::NONE) {
4805 packTest.testSuccess();
4806 return;
4807 } else {
4808 packTest.testFailure(ANEURALNETWORKS_BAD_DATA);
4809 }
4810 }
4811 }
4812
TEST(OperationValidationTest,PACK_quant8_bad_none)4813 TEST(OperationValidationTest, PACK_quant8_bad_none) {
4814 // Make sure packTestBadQuantization starts with a valid operation and only corrupts what it
4815 // intends to.
4816 packTestBadQuantization(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, BadQuantization::NONE);
4817 }
4818
TEST(OperationValidationTest,PACK_quant8_bad_zeroPoint)4819 TEST(OperationValidationTest, PACK_quant8_bad_zeroPoint) {
4820 packTestBadQuantization(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, BadQuantization::zeroPoint);
4821 }
4822
TEST(OperationValidationTest,PACK_quant8_bad_scale)4823 TEST(OperationValidationTest, PACK_quant8_bad_scale) {
4824 packTestBadQuantization(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, BadQuantization::scale);
4825 }
4826
TEST(OperationValidationTest,PACK_quant8_signed_bad_zeroPoint)4827 TEST(OperationValidationTest, PACK_quant8_signed_bad_zeroPoint) {
4828 packTestBadQuantization(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED, BadQuantization::zeroPoint);
4829 }
4830
TEST(OperationValidationTest,PACK_quant8_signed_bad_scale)4831 TEST(OperationValidationTest, PACK_quant8_signed_bad_scale) {
4832 packTestBadQuantization(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED, BadQuantization::scale);
4833 }
4834
4835 // Test ranks that are inconsistent among operands.
packTestBadRank(uint32_t operandCode,int adjustRank)4836 void packTestBadRank(uint32_t operandCode, int adjustRank) {
4837 constexpr uint32_t inputTensorCount = 2;
4838 const uint32_t inputDimensions[3] = {4, 5, 6};
4839 constexpr size_t inputRank = sizeof(inputDimensions) / sizeof(inputDimensions[0]);
4840 const ANeuralNetworksOperandType inputTensorType =
4841 getOpType(operandCode, inputRank, inputDimensions);
4842
4843 // Behave as if the axis equals 0.
4844 constexpr uint32_t outputRank = inputRank + 1;
4845 uint32_t outputDimensions[outputRank];
4846 std::copy(std::begin(inputDimensions), std::end(inputDimensions),
4847 std::begin(outputDimensions) + 1);
4848 outputDimensions[0] = inputTensorCount;
4849
4850 // The "deviant" is the operand whose rank is to be made inconsistent with
4851 // those of other operands.
4852 // inputTensorCount = Change the output tensor.
4853 // [0, inputTensorCount) = Change the corresponding input tensor.
4854 for (uint32_t deviant = 0; deviant <= inputTensorCount; ++deviant) {
4855 SCOPED_TRACE(deviant);
4856
4857 std::vector<uint32_t> scrambledDimensions;
4858 auto scramble = [adjustRank, &scrambledDimensions](ANeuralNetworksOperandType* type) {
4859 if (!adjustRank) {
4860 return;
4861 }
4862 if (adjustRank < 0) {
4863 ASSERT_GT(type->dimensionCount, uint32_t(-adjustRank));
4864 type->dimensionCount += adjustRank;
4865 return;
4866 }
4867 const uint32_t oldRank = type->dimensionCount;
4868 const uint32_t newRank = oldRank + adjustRank;
4869 ASSERT_EQ(scrambledDimensions.size(), size_t(0)); // only use this vector once
4870 scrambledDimensions.resize(newRank);
4871 std::copy(&type->dimensions[0], &type->dimensions[oldRank], &scrambledDimensions[0]);
4872 std::fill(&scrambledDimensions[oldRank], &scrambledDimensions[newRank],
4873 /* arbitrary choice */ 7);
4874 type->dimensionCount = newRank;
4875 type->dimensions = &scrambledDimensions[0];
4876 };
4877
4878 std::vector<ANeuralNetworksOperandType> inputTypes = {packAxisType};
4879 inputTypes.insert(inputTypes.end(), inputTensorCount, inputTensorType);
4880 ANeuralNetworksOperandType outputType =
4881 getOpType(operandCode, outputRank, outputDimensions);
4882 if (deviant == inputTensorCount) {
4883 scramble(&outputType);
4884 } else {
4885 scramble(&inputTypes[1 + deviant]);
4886 }
4887 OperationTestBase packTest(ANEURALNETWORKS_PACK, inputTypes, {outputType});
4888 if (adjustRank) {
4889 packTest.testFailure(ANEURALNETWORKS_BAD_DATA);
4890 } else {
4891 packTest.testSuccess();
4892 return;
4893 }
4894 }
4895 }
4896
TEST(OperationValidationTest,PACK_float32_rank_good)4897 TEST(OperationValidationTest, PACK_float32_rank_good) {
4898 // Make sure packTestBadRank starts with a valid operation and only corrupts it when it intends
4899 // to.
4900 packTestBadRank(ANEURALNETWORKS_TENSOR_FLOAT32, 0);
4901 }
4902
TEST(OperationValidationTest,PACK_float32_rank_lo)4903 TEST(OperationValidationTest, PACK_float32_rank_lo) {
4904 packTestBadRank(ANEURALNETWORKS_TENSOR_FLOAT32, -1);
4905 }
4906
TEST(OperationValidationTest,PACK_float32_rank_hi)4907 TEST(OperationValidationTest, PACK_float32_rank_hi) {
4908 packTestBadRank(ANEURALNETWORKS_TENSOR_FLOAT32, 1);
4909 }
4910
reverseTest(int32_t operandCode)4911 void reverseTest(int32_t operandCode) {
4912 const uint32_t tensorDimensions[3] = {4, 5, 6};
4913 constexpr size_t tensorRank = sizeof(tensorDimensions) / sizeof(tensorDimensions[0]);
4914 const ANeuralNetworksOperandType tensorType =
4915 getOpType(operandCode, tensorRank, tensorDimensions);
4916
4917 const uint32_t axisDimensions[1] = {0};
4918 constexpr size_t axisRank = sizeof(axisDimensions) / sizeof(axisDimensions[0]);
4919 const ANeuralNetworksOperandType axisType =
4920 getOpType(ANEURALNETWORKS_TENSOR_INT32, axisRank, axisDimensions);
4921
4922 OperationTestBase reverseTest(ANEURALNETWORKS_REVERSE, {tensorType, axisType}, {tensorType});
4923 reverseTest.testOpsValidations();
4924 }
4925
TEST(OperationValidationTest,REVERSE_float16)4926 TEST(OperationValidationTest, REVERSE_float16) {
4927 reverseTest(ANEURALNETWORKS_TENSOR_FLOAT16);
4928 }
4929
TEST(OperationValidationTest,REVERSE_float32)4930 TEST(OperationValidationTest, REVERSE_float32) {
4931 reverseTest(ANEURALNETWORKS_TENSOR_FLOAT32);
4932 }
4933
TEST(OperationValidationTest,REVERSE_quant8)4934 TEST(OperationValidationTest, REVERSE_quant8) {
4935 reverseTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM);
4936 }
4937
TEST(OperationValidationTest,REVERSE_quant8_signed)4938 TEST(OperationValidationTest, REVERSE_quant8_signed) {
4939 reverseTest(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED);
4940 }
4941
TEST(OperationValidationTest,REVERSE_int32)4942 TEST(OperationValidationTest, REVERSE_int32) {
4943 reverseTest(ANEURALNETWORKS_TENSOR_INT32);
4944 }
4945
4946 // Test quantization parameters that are inconsistent among operands.
reverseTestBadQuantization(int32_t operandCode,BadQuantization bad)4947 void reverseTestBadQuantization(int32_t operandCode, BadQuantization bad) {
4948 const uint32_t tensorDimensions[3] = {4, 5, 6};
4949 constexpr size_t tensorRank = sizeof(tensorDimensions) / sizeof(tensorDimensions[0]);
4950 const ANeuralNetworksOperandType tensorType =
4951 getOpType(operandCode, tensorRank, tensorDimensions);
4952
4953 const uint32_t axisDimensions[1] = {0};
4954 constexpr size_t axisRank = sizeof(axisDimensions) / sizeof(axisDimensions[0]);
4955 const ANeuralNetworksOperandType axisType =
4956 getOpType(ANEURALNETWORKS_TENSOR_INT32, axisRank, axisDimensions);
4957
4958 ANeuralNetworksOperandType outputType = tensorType;
4959 scramble(&outputType, bad);
4960
4961 OperationTestBase reverseTest(ANEURALNETWORKS_REVERSE, {tensorType, axisType}, {outputType});
4962 if (bad == BadQuantization::NONE) {
4963 reverseTest.testSuccess();
4964 return;
4965 } else {
4966 reverseTest.testFailure(ANEURALNETWORKS_BAD_DATA);
4967 }
4968 }
4969
TEST(OperationValidationTest,REVERSE_quant8_bad_none)4970 TEST(OperationValidationTest, REVERSE_quant8_bad_none) {
4971 // Make sure reverseTestBadQuantization starts with a valid operation and only corrupts what it
4972 // intends to.
4973 reverseTestBadQuantization(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, BadQuantization::NONE);
4974 }
4975
TEST(OperationValidationTest,REVERSE_quant8_bad_zeroPoint)4976 TEST(OperationValidationTest, REVERSE_quant8_bad_zeroPoint) {
4977 reverseTestBadQuantization(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, BadQuantization::zeroPoint);
4978 }
4979
TEST(OperationValidationTest,REVERSE_quant8_bad_scale)4980 TEST(OperationValidationTest, REVERSE_quant8_bad_scale) {
4981 reverseTestBadQuantization(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, BadQuantization::scale);
4982 }
4983
TEST(OperationValidationTest,REVERSE_quant8_signed_bad_zeroPoint)4984 TEST(OperationValidationTest, REVERSE_quant8_signed_bad_zeroPoint) {
4985 reverseTestBadQuantization(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED,
4986 BadQuantization::zeroPoint);
4987 }
4988
TEST(OperationValidationTest,REVERSE_quant8_signed_bad_scale)4989 TEST(OperationValidationTest, REVERSE_quant8_signed_bad_scale) {
4990 reverseTestBadQuantization(ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED, BadQuantization::scale);
4991 }
4992
4993 // Test ranks that are inconsistent among operands or otherwise incorrect.
reverseTestBadRank(uint32_t operandCode,int adjustRank)4994 void reverseTestBadRank(uint32_t operandCode, int adjustRank) {
4995 const uint32_t tensorDimensions[3] = {4, 5, 6};
4996 constexpr size_t tensorRank = sizeof(tensorDimensions) / sizeof(tensorDimensions[0]);
4997 const ANeuralNetworksOperandType tensorType =
4998 getOpType(operandCode, tensorRank, tensorDimensions);
4999
5000 const uint32_t axisDimensions[1] = {0};
5001 constexpr size_t axisRank = sizeof(axisDimensions) / sizeof(axisDimensions[0]);
5002 const ANeuralNetworksOperandType axisType =
5003 getOpType(ANEURALNETWORKS_TENSOR_INT32, axisRank, axisDimensions);
5004
5005 constexpr size_t kOperandCount = 3; // 2 inputs, 1 output
5006
5007 // The "deviant" is the operand whose rank is to be changed.
5008 for (uint32_t deviant = 0; deviant < kOperandCount; ++deviant) {
5009 SCOPED_TRACE(deviant);
5010
5011 // input 0, input 1, output 0
5012 std::vector<ANeuralNetworksOperandType> operands = {tensorType, axisType, tensorType};
5013 ASSERT_EQ(operands.size(), kOperandCount);
5014
5015 std::vector<uint32_t> scrambledDimensions;
5016 auto scramble = [adjustRank,
5017 &scrambledDimensions](ANeuralNetworksOperandType* type) -> bool {
5018 if (!adjustRank) {
5019 return true;
5020 }
5021 if (adjustRank < 0) {
5022 if (type->dimensionCount <= uint32_t(-adjustRank)) {
5023 // not a valid test scenario
5024 return false;
5025 }
5026 type->dimensionCount += adjustRank;
5027 return true;
5028 }
5029 const uint32_t oldRank = type->dimensionCount;
5030 const uint32_t newRank = oldRank + adjustRank;
5031 EXPECT_EQ(scrambledDimensions.size(), size_t(0)); // only use this vector once
5032 scrambledDimensions.assign(&type->dimensions[0], &type->dimensions[oldRank]);
5033 scrambledDimensions.resize(newRank, /* arbitrary choice */ 7);
5034 type->dimensionCount = newRank;
5035 type->dimensions = &scrambledDimensions[0];
5036 return true;
5037 };
5038
5039 if (!scramble(&operands[deviant])) {
5040 continue;
5041 }
5042 OperationTestBase reverseTest(ANEURALNETWORKS_REVERSE, {operands[0], operands[1]},
5043 {operands[2]});
5044 if (adjustRank) {
5045 reverseTest.testFailure(ANEURALNETWORKS_BAD_DATA);
5046 } else {
5047 reverseTest.testSuccess();
5048 return;
5049 }
5050 }
5051 }
5052
TEST(OperationValidationTest,REVERSE_float32_rank_good)5053 TEST(OperationValidationTest, REVERSE_float32_rank_good) {
5054 // Make sure reverseTestBadRank starts with a valid operation and only corrupts it when it
5055 // intends to.
5056 reverseTestBadRank(ANEURALNETWORKS_TENSOR_FLOAT32, 0);
5057 }
5058
TEST(OperationValidationTest,REVERSE_float32_rank_lo)5059 TEST(OperationValidationTest, REVERSE_float32_rank_lo) {
5060 reverseTestBadRank(ANEURALNETWORKS_TENSOR_FLOAT32, -1);
5061 }
5062
TEST(OperationValidationTest,REVERSE_float32_rank_hi)5063 TEST(OperationValidationTest, REVERSE_float32_rank_hi) {
5064 reverseTestBadRank(ANEURALNETWORKS_TENSOR_FLOAT32, 1);
5065 }
5066
5067 } // end namespace
5068