xref: /aosp_15_r20/external/android-nn-driver/test/Concat.cpp (revision 3e777be0405cee09af5d5785ff37f7cfb5bee59a)
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "DriverTestHelpers.hpp"
7 #include "TestTensor.hpp"
8 
9 #include <array>
10 #include <log/log.h>
11 
12 using namespace android::hardware;
13 using namespace driverTestHelpers;
14 using namespace armnn_driver;
15 
16 using HalPolicy = hal_1_0::HalPolicy;
17 using RequestArgument = V1_0::RequestArgument;
18 
19 namespace
20 {
21 
22 void
ConcatTestImpl(const std::vector<const TestTensor * > & inputs,int32_t concatAxis,const TestTensor & expectedOutputTensor,armnn::Compute computeDevice,V1_0::ErrorStatus expectedPrepareStatus=V1_0::ErrorStatus::NONE,V1_0::ErrorStatus expectedExecStatus=V1_0::ErrorStatus::NONE)23 ConcatTestImpl(const std::vector<const TestTensor*> & inputs,
24                 int32_t concatAxis,
25                 const TestTensor & expectedOutputTensor,
26                 armnn::Compute computeDevice,
27                 V1_0::ErrorStatus expectedPrepareStatus=V1_0::ErrorStatus::NONE,
28                 V1_0::ErrorStatus expectedExecStatus=V1_0::ErrorStatus::NONE)
29 {
30     std::unique_ptr<ArmnnDriver> driver = std::make_unique<ArmnnDriver>(DriverOptions(computeDevice));
31     HalPolicy::Model model{};
32 
33     hidl_vec<uint32_t> modelInputIds;
34     modelInputIds.resize(inputs.size()+1);
35     for (uint32_t i = 0; i<inputs.size(); ++i)
36     {
37         modelInputIds[i] = i;
38         AddInputOperand<HalPolicy>(model, inputs[i]->GetDimensions());
39     }
40     modelInputIds[inputs.size()] = inputs.size(); // add an id for the axis too
41     AddIntOperand<HalPolicy>(model, concatAxis);
42     AddOutputOperand<HalPolicy>(model, expectedOutputTensor.GetDimensions());
43 
44     // make the concat operation
45     model.operations.resize(1);
46     model.operations[0].type    = HalPolicy::OperationType::CONCATENATION;
47     model.operations[0].inputs  = modelInputIds;
48     model.operations[0].outputs = hidl_vec<uint32_t>{static_cast<uint32_t>(inputs.size()+1)};
49 
50     // make the prepared model
51     V1_0::ErrorStatus prepareStatus = V1_0::ErrorStatus::NONE;
52     android::sp<V1_0::IPreparedModel> preparedModel = PrepareModelWithStatus(model,
53                                                                              *driver,
54                                                                              prepareStatus,
55                                                                              expectedPrepareStatus);
56     DOCTEST_CHECK((int)prepareStatus == (int)expectedPrepareStatus);
57     if (prepareStatus != V1_0::ErrorStatus::NONE)
58     {
59         // prepare failed, we cannot continue
60         return;
61     }
62 
63     DOCTEST_CHECK(preparedModel.get() != nullptr);
64     if (preparedModel.get() == nullptr)
65     {
66         // don't spoil other tests if prepare failed
67         return;
68     }
69 
70     // construct the request
71     hidl_vec<RequestArgument> inputArguments;
72     hidl_vec<RequestArgument> outputArguments;
73     inputArguments.resize(inputs.size());
74     outputArguments.resize(1);
75 
76     // the request's memory pools will follow the same order as
77     // the inputs
78     for (uint32_t i = 0; i<inputs.size(); ++i)
79     {
80         V1_0::DataLocation inloc = {};
81         inloc.poolIndex = i;
82         inloc.offset = 0;
83         inloc.length = inputs[i]->GetNumElements() * sizeof(float);
84         RequestArgument input = {};
85         input.location = inloc;
86         input.dimensions = inputs[i]->GetDimensions();
87         inputArguments[i] = input;
88     }
89 
90     // and an additional memory pool is needed for the output
91     {
92         V1_0::DataLocation outloc = {};
93         outloc.poolIndex = inputs.size();
94         outloc.offset = 0;
95         outloc.length = expectedOutputTensor.GetNumElements() * sizeof(float);
96         RequestArgument output = {};
97         output.location = outloc;
98         output.dimensions = expectedOutputTensor.GetDimensions();
99         outputArguments[0] = output;
100     }
101 
102     // make the request based on the arguments
103     V1_0::Request request = {};
104     request.inputs  = inputArguments;
105     request.outputs = outputArguments;
106 
107     // set the input data
108     for (uint32_t i = 0; i<inputs.size(); ++i)
109     {
110         AddPoolAndSetData(inputs[i]->GetNumElements(),
111                             request,
112                             inputs[i]->GetData());
113     }
114 
115     // add memory for the output
116     android::sp<IMemory> outMemory = AddPoolAndGetData<float>(expectedOutputTensor.GetNumElements(), request);
117     float* outdata = static_cast<float*>(static_cast<void*>(outMemory->getPointer()));
118 
119     // run the execution
120     DOCTEST_CHECK(preparedModel.get() != nullptr);
121     auto execStatus = Execute(preparedModel, request, expectedExecStatus);
122     DOCTEST_CHECK((int)execStatus == (int)expectedExecStatus);
123 
124     if (execStatus == V1_0::ErrorStatus::NONE)
125     {
126         // check the result if there was no error
127         const float * expectedOutput = expectedOutputTensor.GetData();
128         for (unsigned int i=0; i<expectedOutputTensor.GetNumElements();++i)
129         {
130             DOCTEST_CHECK(outdata[i] == expectedOutput[i]);
131         }
132     }
133 }
134 
135 /// Test cases...
SimpleConcatAxis0(armnn::Compute computeDevice)136 void SimpleConcatAxis0(armnn::Compute computeDevice)
137 {
138     int32_t axis = 0;
139     TestTensor aIn{armnn::TensorShape{1, 1, 1, 1}, {0}};
140     TestTensor bIn{armnn::TensorShape{1, 1, 1, 1}, {1}};
141     TestTensor cIn{armnn::TensorShape{1, 1, 1, 1}, {2}};
142 
143     TestTensor expected{armnn::TensorShape{3, 1, 1, 1}, {0, 1, 2}};
144     ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
145 }
146 
ConcatAxis0NoInterleave(armnn::Compute computeDevice)147 void ConcatAxis0NoInterleave(armnn::Compute computeDevice)
148 {
149     int32_t axis = 0;
150     TestTensor aIn{armnn::TensorShape{2, 1, 2, 1}, {0, 1,
151                                                     2, 3}};
152     TestTensor bIn{armnn::TensorShape{3, 1, 2, 1}, {4, 5,
153                                                     6, 7,
154                                                     8, 9}};
155     TestTensor cIn{armnn::TensorShape{1, 1, 2, 1}, {10, 11}};
156 
157     TestTensor expected{armnn::TensorShape{6, 1, 2, 1}, {0, 1,
158                                                          2, 3,
159                                                          4, 5,
160                                                          6, 7,
161                                                          8, 9,
162                                                          10, 11}};
163 
164     ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
165 }
166 
SimpleConcatAxis1(armnn::Compute computeDevice)167 void SimpleConcatAxis1(armnn::Compute computeDevice)
168 {
169     int32_t axis = 1;
170     TestTensor aIn{armnn::TensorShape{1, 1, 1, 1}, {0}};
171     TestTensor bIn{armnn::TensorShape{1, 1, 1, 1}, {1}};
172     TestTensor cIn{armnn::TensorShape{1, 1, 1, 1}, {2}};
173 
174     TestTensor expected{armnn::TensorShape{1, 3, 1, 1}, {0, 1, 2}};
175 
176     ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
177 }
178 
ConcatAxis1NoInterleave(armnn::Compute computeDevice)179 void ConcatAxis1NoInterleave(armnn::Compute computeDevice)
180 {
181     int32_t axis = 1;
182     TestTensor aIn{armnn::TensorShape{1, 2, 2, 1}, {0, 1,
183                                                     2, 3}};
184     TestTensor bIn{armnn::TensorShape{1, 3, 2, 1}, {4, 5,
185                                                     6, 7,
186                                                     8, 9}};
187     TestTensor cIn{armnn::TensorShape{1, 1, 2, 1}, {10, 11}};
188 
189     TestTensor expected{armnn::TensorShape{1, 6, 2, 1}, {0, 1,
190                                                          2, 3,
191                                                          4, 5,
192                                                          6, 7,
193                                                          8, 9,
194                                                          10, 11}};
195 
196     ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
197 }
198 
SimpleConcatAxis1DoInterleave(armnn::Compute computeDevice)199 void SimpleConcatAxis1DoInterleave(armnn::Compute computeDevice)
200 {
201     int32_t axis = 1;
202     TestTensor aIn{armnn::TensorShape{2, 2, 1, 1}, {0, 1,
203                                                     2, 3}};
204     TestTensor bIn{armnn::TensorShape{2, 3, 1, 1}, {4, 5, 6,
205                                                     7, 8, 9}};
206     TestTensor cIn{armnn::TensorShape{2, 1, 1, 1}, {10,
207                                                     11}};
208 
209     TestTensor expected{armnn::TensorShape{2, 6, 1, 1}, {0, 1, 4, 5, 6, 10,
210                                                          2, 3, 7, 8, 9, 11}};
211 
212     ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
213 }
214 
SimpleConcatAxis2(armnn::Compute computeDevice)215 void SimpleConcatAxis2(armnn::Compute computeDevice)
216 {
217     int32_t axis = 2;
218     TestTensor aIn{armnn::TensorShape{1, 1, 1, 1}, {0}};
219     TestTensor bIn{armnn::TensorShape{1, 1, 1, 1}, {1}};
220     TestTensor cIn{armnn::TensorShape{1, 1, 1, 1}, {2}};
221 
222     TestTensor expected{armnn::TensorShape{1, 1, 3, 1}, {0, 1, 2}};
223 
224     ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
225 }
226 
ConcatAxis2NoInterleave(armnn::Compute computeDevice)227 void ConcatAxis2NoInterleave(armnn::Compute computeDevice)
228 {
229     int32_t axis = 2;
230     TestTensor aIn{armnn::TensorShape{1, 1, 2, 2}, {0, 1,
231                                                     2, 3}};
232     TestTensor bIn{armnn::TensorShape{1, 1, 3, 2}, {4, 5,
233                                                     6, 7,
234                                                     8, 9}};
235     TestTensor cIn{armnn::TensorShape{1, 1, 1, 2}, {10, 11}};
236 
237     TestTensor expected{armnn::TensorShape{1, 1, 6, 2}, {0, 1,
238                                                          2, 3,
239                                                          4, 5,
240                                                          6, 7,
241                                                          8, 9,
242                                                          10, 11}};
243 
244     ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
245 }
246 
SimpleConcatAxis2DoInterleave(armnn::Compute computeDevice)247 void SimpleConcatAxis2DoInterleave(armnn::Compute computeDevice)
248 {
249     int32_t axis = 2;
250     TestTensor aIn{armnn::TensorShape{1, 2, 2, 1}, {0, 1,
251                                                     2, 3}};
252     TestTensor bIn{armnn::TensorShape{1, 2, 3, 1}, {4, 5, 6,
253                                                     7, 8, 9}};
254     TestTensor cIn{armnn::TensorShape{1, 2, 1, 1}, {10,
255                                                     11}};
256 
257     TestTensor expected{armnn::TensorShape{1, 2, 6, 1}, {0, 1, 4, 5, 6, 10,
258                                                          2, 3, 7, 8, 9, 11}};
259 
260     ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
261 }
262 
SimpleConcatAxis3(armnn::Compute computeDevice)263 void SimpleConcatAxis3(armnn::Compute computeDevice)
264 {
265     int32_t axis = 3;
266     TestTensor aIn{armnn::TensorShape{1, 1, 1, 1}, {0}};
267     TestTensor bIn{armnn::TensorShape{1, 1, 1, 1}, {1}};
268     TestTensor cIn{armnn::TensorShape{1, 1, 1, 1}, {2}};
269 
270     TestTensor expected{armnn::TensorShape{1, 1, 1, 3}, {0, 1, 2}};
271 
272     ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
273 }
274 
SimpleConcatAxis3DoInterleave(armnn::Compute computeDevice)275 void SimpleConcatAxis3DoInterleave(armnn::Compute computeDevice)
276 {
277     int32_t axis = 3;
278     TestTensor aIn{armnn::TensorShape{1, 1, 2, 2}, {0, 1,
279                                                     2, 3}};
280     TestTensor bIn{armnn::TensorShape{1, 1, 2, 3}, {4, 5, 6,
281                                                     7, 8, 9}};
282     TestTensor cIn{armnn::TensorShape{1, 1, 2, 1}, {10,
283                                                     11}};
284 
285     TestTensor expected{armnn::TensorShape{1, 1, 2, 6}, {0, 1, 4, 5, 6, 10,
286                                                          2, 3, 7, 8, 9, 11}};
287 
288     ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
289 }
290 
AxisTooBig(armnn::Compute computeDevice)291 void AxisTooBig(armnn::Compute computeDevice)
292 {
293     int32_t axis = 4;
294     TestTensor aIn{armnn::TensorShape{1, 1, 1, 1}, {0}};
295     TestTensor bIn{armnn::TensorShape{1, 1, 1, 1}, {0}};
296 
297     // The axis must be within the range of [-rank(values), rank(values))
298     // see: https://www.tensorflow.org/api_docs/python/tf/concat
299     TestTensor uncheckedOutput{armnn::TensorShape{1, 1, 1, 1}, {0}};
300     V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
301     ConcatTestImpl({&aIn, &bIn}, axis, uncheckedOutput, computeDevice, expectedParserStatus);
302 }
303 
AxisTooSmall(armnn::Compute computeDevice)304 void AxisTooSmall(armnn::Compute computeDevice)
305 {
306     int32_t axis = -5;
307     TestTensor aIn{armnn::TensorShape{1, 1, 1, 1}, {0}};
308     TestTensor bIn{armnn::TensorShape{1, 1, 1, 1}, {0}};
309 
310     // The axis must be within the range of [-rank(values), rank(values))
311     // see: https://www.tensorflow.org/api_docs/python/tf/concat
312     TestTensor uncheckedOutput{armnn::TensorShape{1, 1, 1, 1}, {0}};
313     V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
314     ConcatTestImpl({&aIn, &bIn}, axis, uncheckedOutput, computeDevice, expectedParserStatus);
315 }
316 
TooFewInputs(armnn::Compute computeDevice)317 void TooFewInputs(armnn::Compute computeDevice)
318 {
319     int32_t axis = 0;
320     TestTensor aIn{armnn::TensorShape{1, 1, 1, 1}, {0}};
321 
322     // We need at least two tensors to concatenate
323     V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
324     ConcatTestImpl({&aIn}, axis, aIn, computeDevice, expectedParserStatus);
325 }
326 
MismatchedInputDimensions(armnn::Compute computeDevice)327 void MismatchedInputDimensions(armnn::Compute computeDevice)
328 {
329     int32_t axis = 3;
330     TestTensor aIn{armnn::TensorShape{1, 1, 2, 2}, {0, 1,
331                                                     2, 3}};
332     TestTensor bIn{armnn::TensorShape{1, 1, 2, 3}, {4, 5, 6,
333                                                     7, 8, 9}};
334     TestTensor mismatched{armnn::TensorShape{1, 1, 1, 1}, {10}};
335 
336     TestTensor expected{armnn::TensorShape{1, 1, 2, 6}, {0, 1, 4, 5, 6, 10,
337                                                          2, 3, 7, 8, 9, 11}};
338 
339     // The input dimensions must be compatible
340     V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
341     ConcatTestImpl({&aIn, &bIn, &mismatched}, axis, expected, computeDevice, expectedParserStatus);
342 }
343 
MismatchedInputRanks(armnn::Compute computeDevice)344 void MismatchedInputRanks(armnn::Compute computeDevice)
345 {
346     int32_t axis = 2;
347     TestTensor aIn{armnn::TensorShape{1, 1, 2}, {0, 1}};
348     TestTensor bIn{armnn::TensorShape{1, 1}, {4}};
349     TestTensor expected{armnn::TensorShape{1, 1, 3}, {0, 1, 4}};
350 
351     // The input dimensions must be compatible
352     V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
353     ConcatTestImpl({&aIn, &bIn}, axis, expected, computeDevice, expectedParserStatus);
354 }
355 
MismatchedOutputDimensions(armnn::Compute computeDevice)356 void MismatchedOutputDimensions(armnn::Compute computeDevice)
357 {
358     int32_t axis = 3;
359     TestTensor aIn{armnn::TensorShape{1, 1, 2, 2}, {0, 1,
360                                                     2, 3}};
361     TestTensor bIn{armnn::TensorShape{1, 1, 2, 3}, {4, 5, 6,
362                                                     7, 8, 9}};
363     TestTensor cIn{armnn::TensorShape{1, 1, 2, 1}, {10,
364                                                     11}};
365 
366     TestTensor mismatched{armnn::TensorShape{1, 1, 6, 2}, {0, 1, 4, 5, 6, 10,
367                                                            2, 3, 7, 8, 9, 11}};
368 
369     // The input and output dimensions must be compatible
370     V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
371     ConcatTestImpl({&aIn, &bIn, &cIn}, axis, mismatched, computeDevice, expectedParserStatus);
372 }
373 
MismatchedOutputRank(armnn::Compute computeDevice)374 void MismatchedOutputRank(armnn::Compute computeDevice)
375 {
376     int32_t axis = 3;
377     TestTensor aIn{armnn::TensorShape{1, 1, 2, 2}, {0, 1,
378                                                     2, 3}};
379     TestTensor bIn{armnn::TensorShape{1, 1, 2, 3}, {4, 5, 6,
380                                                     7, 8, 9}};
381     TestTensor cIn{armnn::TensorShape{1, 1, 2, 1}, {10,
382                                                     11}};
383 
384     TestTensor mismatched{armnn::TensorShape{6, 2}, {0, 1, 4, 5, 6, 10,
385                                                      2, 3, 7, 8, 9, 11}};
386 
387     // The input and output ranks must match
388     V1_0::ErrorStatus expectedParserStatus = V1_0::ErrorStatus::GENERAL_FAILURE;
389     ConcatTestImpl({&aIn, &bIn, &cIn}, axis, mismatched, computeDevice, expectedParserStatus);
390 }
391 
ValidNegativeAxis(armnn::Compute computeDevice)392 void ValidNegativeAxis(armnn::Compute computeDevice)
393 {
394     // this is the same as 3
395     // see: https://www.tensorflow.org/api_docs/python/tf/concat
396     int32_t axis = -1;
397     TestTensor aIn{armnn::TensorShape{1, 1, 2, 2}, {0, 1,
398                                                     2, 3}};
399     TestTensor bIn{armnn::TensorShape{1, 1, 2, 3}, {4, 5, 6,
400                                                     7, 8, 9}};
401     TestTensor cIn{armnn::TensorShape{1, 1, 2, 1}, {10,
402                                                     11}};
403 
404     TestTensor expected{armnn::TensorShape{1, 1, 2, 6}, {0, 1, 4, 5, 6, 10,
405                                                          2, 3, 7, 8, 9, 11}};
406 
407     ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
408 }
409 
SimpleConcatAxisZero3D(armnn::Compute computeDevice)410 void SimpleConcatAxisZero3D(armnn::Compute computeDevice)
411 {
412     int32_t axis = 0;
413     TestTensor aIn{armnn::TensorShape{1, 1, 1}, {0}};
414     TestTensor bIn{armnn::TensorShape{1, 1, 1}, {1}};
415     TestTensor cIn{armnn::TensorShape{1, 1, 1}, {2}};
416 
417     TestTensor expected{armnn::TensorShape{3, 1, 1}, {0, 1, 2}};
418 
419     ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
420 }
421 
SimpleConcatAxisOne3D(armnn::Compute computeDevice)422 void SimpleConcatAxisOne3D(armnn::Compute computeDevice)
423 {
424     int32_t axis = 1;
425     TestTensor aIn{armnn::TensorShape{1, 1, 1}, {0}};
426     TestTensor bIn{armnn::TensorShape{1, 1, 1}, {1}};
427     TestTensor cIn{armnn::TensorShape{1, 1, 1}, {2}};
428 
429     TestTensor expected{armnn::TensorShape{1, 3, 1}, {0, 1, 2}};
430 
431     ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
432 }
433 
SimpleConcatAxisTwo3D(armnn::Compute computeDevice)434 void SimpleConcatAxisTwo3D(armnn::Compute computeDevice)
435 {
436     int32_t axis = 2;
437     TestTensor aIn{armnn::TensorShape{1, 1, 1}, {0}};
438     TestTensor bIn{armnn::TensorShape{1, 1, 1}, {1}};
439     TestTensor cIn{armnn::TensorShape{1, 1, 1}, {2}};
440 
441     TestTensor expected{armnn::TensorShape{1, 1, 3}, {0, 1, 2}};
442 
443     ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
444 }
445 
SimpleConcatAxisZero2D(armnn::Compute computeDevice)446 void SimpleConcatAxisZero2D(armnn::Compute computeDevice)
447 {
448     int32_t axis = 0;
449     TestTensor aIn{armnn::TensorShape{1, 1}, {0}};
450     TestTensor bIn{armnn::TensorShape{1, 1}, {1}};
451     TestTensor cIn{armnn::TensorShape{1, 1}, {2}};
452 
453     TestTensor expected{armnn::TensorShape{3, 1}, {0, 1, 2}};
454 
455     ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
456 }
457 
SimpleConcatAxisOne2D(armnn::Compute computeDevice)458 void SimpleConcatAxisOne2D(armnn::Compute computeDevice)
459 {
460     int32_t axis = 1;
461     TestTensor aIn{armnn::TensorShape{1, 1}, {0}};
462     TestTensor bIn{armnn::TensorShape{1, 1}, {1}};
463     TestTensor cIn{armnn::TensorShape{1, 1}, {2}};
464 
465     TestTensor expected{armnn::TensorShape{1, 3}, {0, 1, 2}};
466 
467     ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
468 }
469 
SimpleConcatAxisZero1D(armnn::Compute computeDevice)470 void SimpleConcatAxisZero1D(armnn::Compute computeDevice)
471 {
472     int32_t axis = 0;
473     TestTensor aIn{armnn::TensorShape{1}, {0}};
474     TestTensor bIn{armnn::TensorShape{1}, {1}};
475     TestTensor cIn{armnn::TensorShape{1}, {2}};
476 
477     TestTensor expected{armnn::TensorShape{3}, {0, 1, 2}};
478     ConcatTestImpl({&aIn, &bIn, &cIn}, axis, expected, computeDevice);
479 }
480 
481 } // namespace <anonymous>
482 
483 DOCTEST_TEST_SUITE("ConcatTests_CpuRef")
484 {
485 
486 DOCTEST_TEST_CASE("SimpleConcatAxis0")
487 {
488     SimpleConcatAxis0(armnn::Compute::CpuRef);
489 }
490 
491 DOCTEST_TEST_CASE("ConcatAxis0NoInterleave")
492 {
493     ConcatAxis0NoInterleave(armnn::Compute::CpuRef);
494 }
495 
496 DOCTEST_TEST_CASE("SimpleConcatAxis1")
497 {
498     SimpleConcatAxis1(armnn::Compute::CpuRef);
499 }
500 
501 DOCTEST_TEST_CASE("ConcatAxis1NoInterleave")
502 {
503     ConcatAxis1NoInterleave(armnn::Compute::CpuRef);
504 }
505 
506 DOCTEST_TEST_CASE("SimpleConcatAxis1DoInterleave")
507 {
508     SimpleConcatAxis1DoInterleave(armnn::Compute::CpuRef);
509 }
510 
511 DOCTEST_TEST_CASE("SimpleConcatAxis2")
512 {
513     SimpleConcatAxis2(armnn::Compute::CpuRef);
514 }
515 
516 DOCTEST_TEST_CASE("ConcatAxis2NoInterleave")
517 {
518     ConcatAxis2NoInterleave(armnn::Compute::CpuRef);
519 }
520 
521 DOCTEST_TEST_CASE("SimpleConcatAxis2DoInterleave")
522 {
523     SimpleConcatAxis2DoInterleave(armnn::Compute::CpuRef);
524 }
525 
526 DOCTEST_TEST_CASE("SimpleConcatAxis3")
527 {
528     SimpleConcatAxis3(armnn::Compute::CpuRef);
529 }
530 
531 DOCTEST_TEST_CASE("SimpleConcatAxis3DoInterleave")
532 {
533     SimpleConcatAxis3DoInterleave(armnn::Compute::CpuRef);
534 }
535 
536 DOCTEST_TEST_CASE("AxisTooBig")
537 {
538     AxisTooBig(armnn::Compute::CpuRef);
539 }
540 
541 DOCTEST_TEST_CASE("AxisTooSmall")
542 {
543     AxisTooSmall(armnn::Compute::CpuRef);
544 }
545 
546 DOCTEST_TEST_CASE("TooFewInputs")
547 {
548     TooFewInputs(armnn::Compute::CpuRef);
549 }
550 
551 DOCTEST_TEST_CASE("MismatchedInputDimensions")
552 {
553     MismatchedInputDimensions(armnn::Compute::CpuRef);
554 }
555 
556 DOCTEST_TEST_CASE("MismatchedInputRanks")
557 {
558     MismatchedInputRanks(armnn::Compute::CpuRef);
559 }
560 
561 DOCTEST_TEST_CASE("MismatchedOutputDimensions")
562 {
563     MismatchedOutputDimensions(armnn::Compute::CpuRef);
564 }
565 
566 DOCTEST_TEST_CASE("MismatchedOutputRank")
567 {
568     MismatchedOutputRank(armnn::Compute::CpuRef);
569 }
570 
571 DOCTEST_TEST_CASE("ValidNegativeAxis")
572 {
573     ValidNegativeAxis(armnn::Compute::CpuRef);
574 }
575 
576 DOCTEST_TEST_CASE("SimpleConcatAxisZero3D")
577 {
578     SimpleConcatAxisZero3D(armnn::Compute::CpuRef);
579 }
580 
581 DOCTEST_TEST_CASE("SimpleConcatAxisOne3D")
582 {
583     SimpleConcatAxisOne3D(armnn::Compute::CpuRef);
584 }
585 
586 DOCTEST_TEST_CASE("SimpleConcatAxisTwo3D")
587 {
588     SimpleConcatAxisTwo3D(armnn::Compute::CpuRef);
589 }
590 
591 DOCTEST_TEST_CASE("SimpleConcatAxisZero2D")
592 {
593     SimpleConcatAxisZero2D(armnn::Compute::CpuRef);
594 }
595 
596 DOCTEST_TEST_CASE("SimpleConcatAxisOne2D")
597 {
598     SimpleConcatAxisOne2D(armnn::Compute::CpuRef);
599 }
600 
601 DOCTEST_TEST_CASE("SimpleConcatAxisZero1D")
602 {
603     SimpleConcatAxisZero1D(armnn::Compute::CpuRef);
604 }
605 
606 }
607 
608 #ifdef ARMCOMPUTECL_ENABLED
609 DOCTEST_TEST_SUITE("ConcatTests_GpuAcc")
610 {
611 
612 DOCTEST_TEST_CASE("SimpleConcatAxis0")
613 {
614     SimpleConcatAxis0(armnn::Compute::GpuAcc);
615 }
616 
617 DOCTEST_TEST_CASE("ConcatAxis0NoInterleave")
618 {
619     ConcatAxis0NoInterleave(armnn::Compute::GpuAcc);
620 }
621 
622 DOCTEST_TEST_CASE("SimpleConcatAxis1")
623 {
624     SimpleConcatAxis1(armnn::Compute::GpuAcc);
625 }
626 
627 DOCTEST_TEST_CASE("ConcatAxis1NoInterleave")
628 {
629     ConcatAxis1NoInterleave(armnn::Compute::GpuAcc);
630 }
631 
632 DOCTEST_TEST_CASE("SimpleConcatAxis1DoInterleave")
633 {
634     SimpleConcatAxis1DoInterleave(armnn::Compute::GpuAcc);
635 }
636 
637 DOCTEST_TEST_CASE("SimpleConcatAxis2")
638 {
639     SimpleConcatAxis2(armnn::Compute::GpuAcc);
640 }
641 
642 DOCTEST_TEST_CASE("ConcatAxis2NoInterleave")
643 {
644     ConcatAxis2NoInterleave(armnn::Compute::GpuAcc);
645 }
646 
647 DOCTEST_TEST_CASE("SimpleConcatAxis2DoInterleave")
648 {
649     SimpleConcatAxis2DoInterleave(armnn::Compute::GpuAcc);
650 }
651 
652 DOCTEST_TEST_CASE("SimpleConcatAxis3")
653 {
654     SimpleConcatAxis3(armnn::Compute::GpuAcc);
655 }
656 
657 DOCTEST_TEST_CASE("SimpleConcatAxis3DoInterleave")
658 {
659     SimpleConcatAxis3DoInterleave(armnn::Compute::GpuAcc);
660 }
661 
662 DOCTEST_TEST_CASE("AxisTooBig")
663 {
664     AxisTooBig(armnn::Compute::GpuAcc);
665 }
666 
667 DOCTEST_TEST_CASE("AxisTooSmall")
668 {
669     AxisTooSmall(armnn::Compute::GpuAcc);
670 }
671 
672 DOCTEST_TEST_CASE("TooFewInputs")
673 {
674     TooFewInputs(armnn::Compute::GpuAcc);
675 }
676 
677 DOCTEST_TEST_CASE("MismatchedInputDimensions")
678 {
679     MismatchedInputDimensions(armnn::Compute::GpuAcc);
680 }
681 
682 DOCTEST_TEST_CASE("MismatchedInputRanks")
683 {
684     MismatchedInputRanks(armnn::Compute::GpuAcc);
685 }
686 
687 DOCTEST_TEST_CASE("MismatchedOutputDimensions")
688 {
689     MismatchedOutputDimensions(armnn::Compute::GpuAcc);
690 }
691 
692 DOCTEST_TEST_CASE("MismatchedOutputRank")
693 {
694     MismatchedOutputRank(armnn::Compute::GpuAcc);
695 }
696 
697 DOCTEST_TEST_CASE("ValidNegativeAxis")
698 {
699     ValidNegativeAxis(armnn::Compute::GpuAcc);
700 }
701 
702 DOCTEST_TEST_CASE("SimpleConcatAxisZero3D")
703 {
704     SimpleConcatAxisZero3D(armnn::Compute::GpuAcc);
705 }
706 
707 DOCTEST_TEST_CASE("SimpleConcatAxisOne3D")
708 {
709     SimpleConcatAxisOne3D(armnn::Compute::GpuAcc);
710 }
711 
712 DOCTEST_TEST_CASE("SimpleConcatAxisTwo3D")
713 {
714     SimpleConcatAxisTwo3D(armnn::Compute::GpuAcc);
715 }
716 
717 DOCTEST_TEST_CASE("SimpleConcatAxisZero2D")
718 {
719     SimpleConcatAxisZero2D(armnn::Compute::GpuAcc);
720 }
721 
722 DOCTEST_TEST_CASE("SimpleConcatAxisOne2D")
723 {
724     SimpleConcatAxisOne2D(armnn::Compute::GpuAcc);
725 }
726 
727 DOCTEST_TEST_CASE("SimpleConcatAxisZero1D")
728 {
729     SimpleConcatAxisZero1D(armnn::Compute::GpuAcc);
730 }
731 
732 }// End of GpuAcc Test Suite
733 #endif