xref: /aosp_15_r20/external/armnn/delegate/test/Convolution2dTest.cpp (revision 89c4ff92f2867872bb9e2354d150bf0c8c502810)
1 //
2 // Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ConvolutionTestHelper.hpp"
7 
8 #include <armnn_delegate.hpp>
9 
10 #include <flatbuffers/flatbuffers.h>
11 #include <tensorflow/lite/interpreter.h>
12 #include <tensorflow/lite/kernels/register.h>
13 #include <tensorflow/lite/model.h>
14 #include <schema_generated.h>
15 #include <tensorflow/lite/version.h>
16 
17 #include <doctest/doctest.h>
18 
19 namespace armnnDelegate
20 {
21 
Conv2DWithBiasesFp32Test(std::vector<armnn::BackendId> & backends)22 void Conv2DWithBiasesFp32Test(std::vector<armnn::BackendId>& backends)
23 {
24     // Set input data
25     std::vector<int32_t> inputShape { 1, 5, 5, 1 };
26     std::vector<int32_t> filterShape { 1, 3, 3, 1 };
27     std::vector<int32_t> biasShape { 1 };
28     std::vector<int32_t> outputShape { 1, 3, 3, 1 };
29 
30     static std::vector<float> inputValues =
31         {
32             1, 5, 2, 3, 5,
33             8, 7, 3, 6, 3,
34             3, 3, 9, 1, 9,
35             4, 1, 8, 1, 3,
36             6, 8, 1, 9, 2
37         };
38 
39     std::vector<float> filterValues =
40         {
41             4, 5, 6,
42             0, 0, 0,
43             3, 2, 1
44         };
45 
46     std::vector<float> biasValues = { 0 };
47 
48     std::vector<float> expectedOutputValues =
49         {
50             23, 33, 24,
51             91, 99, 48,
52             26, 50, 19
53         };
54 
55     tflite::Padding padding = tflite::Padding_SAME;
56 
57     ConvolutionTest<float>(tflite::BuiltinOperator_CONV_2D,
58                                  ::tflite::TensorType_FLOAT32,
59                                  2, // strideX
60                                  2, // strideY
61                                  1, // dilationX
62                                  1, // dilationY
63                                  padding,
64                                  tflite::ActivationFunctionType_NONE,
65                                  backends,
66                                  inputShape,
67                                  filterShape,
68                                  outputShape,
69                                  inputValues,
70                                  filterValues,
71                                  expectedOutputValues,
72                                  biasShape,
73                                  biasValues);
74 }
75 
Conv2DWithBiasesInt8Test(std::vector<armnn::BackendId> & backends)76 void Conv2DWithBiasesInt8Test(std::vector<armnn::BackendId>& backends)
77 {
78     // Set input data
79     std::vector<int32_t> inputShape { 1, 2, 2, 1 };
80     std::vector<int32_t> filterShape { 1, 2, 2, 1 };
81     std::vector<int32_t> biasShape { 1 };
82     std::vector<int32_t> outputShape { 1, 2, 2, 1 };
83 
84     static std::vector<int8_t> inputValues = { 1, 2, 3, 4 };
85 
86     std::vector<int8_t> filterValues = { 2, 1, 0, 6 };
87 
88     std::vector<int32_t> biasValues = { 10 };
89 
90     std::vector<int8_t> expectedOutputValues =
91         {
92             (1 * 2 + 2 * 1 + 3 * 0 + 4 * 6 + 10) / 2, // 19
93             (2 * 2 + 0 * 1 + 4 * 0 + 0 * 6 + 10) / 2, // 7
94             (3 * 2 + 4 * 1 + 0 * 0 + 0 * 6 + 10) / 2, // 10
95             (4 * 2 + 0 * 1 + 0 * 0 + 0 * 6 + 10) / 2,  // 9
96         };
97 
98     tflite::Padding padding = tflite::Padding_SAME;
99 
100     ConvolutionTest<int8_t, int32_t>(tflite::BuiltinOperator_CONV_2D,
101                                             ::tflite::TensorType_INT8,
102                                             1, // strideX
103                                             1, // strideY
104                                             1, // dilationX
105                                             1, // dilationY
106                                             padding,
107                                             tflite::ActivationFunctionType_NONE,
108                                             backends,
109                                             inputShape,
110                                             filterShape,
111                                             outputShape,
112                                             inputValues,
113                                             filterValues,
114                                             expectedOutputValues,
115                                             biasShape,
116                                             biasValues);
117 }
118 
Conv2DWithBiasesReluUint8Test(std::vector<armnn::BackendId> & backends)119 void Conv2DWithBiasesReluUint8Test(std::vector<armnn::BackendId>& backends)
120 {
121     // Set input data
122     std::vector<int32_t> inputShape { 1, 2, 2, 1 };
123     std::vector<int32_t> filterShape { 1, 2, 2, 1 };
124     std::vector<int32_t> biasShape { 1 };
125     std::vector<int32_t> outputShape { 1, 2, 2, 1 };
126 
127     static std::vector<uint8_t> inputValues = { 1, 2, 4, 8 };
128 
129     std::vector<uint8_t> filterValues = { 2, 1, 0, 6 };
130 
131     std::vector<int32_t> biasValues = { 16 };
132 
133     // factors to consider:
134     // - the filter zero point is non zero, hence the (x-fz)
135     // - the output scale is 2 hence the /2
136     // - output zero point is non zero, hence the +outZero
137     // - RELU cuts negative values and then we add the output zero point
138     uint8_t bias = 16;
139     uint8_t outZero = 20;
140     uint8_t fz = 4; // filter zero point
141 
142     std::vector<uint8_t> expectedOutputValues =
143         {
144             std::max(outZero, static_cast<uint8_t>((1*(2-fz) + 2*(1-fz) + 4*(0-fz) + 8*(6-fz) + bias)/2 + outZero)),
145             std::max(outZero, static_cast<uint8_t>((2*(2-fz) + 0*(1-fz) + 8*(0-fz) + 0*(6-fz) + bias)/2 + outZero)),
146             std::max(outZero, static_cast<uint8_t>((4*(2-fz) + 8*(1-fz) + 0*(0-fz) + 0*(6-fz) + bias)/2 + outZero)),
147             std::max(outZero, static_cast<uint8_t>((8*(2-fz) + 0*(1-fz) + 0*(0-fz) + 0*(6-fz) + bias)/2 + outZero))
148         };
149 
150     tflite::Padding padding = tflite::Padding_SAME;
151 
152     ConvolutionTest<uint8_t, int32_t>(tflite::BuiltinOperator_CONV_2D,
153                                             ::tflite::TensorType_UINT8,
154                                             1, // strideX
155                                             1, // strideY
156                                             1, // dilationX
157                                             1, // dilationY
158                                             padding,
159                                             tflite::ActivationFunctionType_RELU,
160                                             backends,
161                                             inputShape,
162                                             filterShape,
163                                             outputShape,
164                                             inputValues,
165                                             filterValues,
166                                             expectedOutputValues,
167                                             biasShape,
168                                             biasValues,
169                                             {1.0f}, // biasScale
170                                             {0},    // biasOffset
171                                             {1.0f}, // filterScale
172                                             {4},    // filterOffsets
173                                             2, // output scale
174                                             20); // output offset
175 }
176 
Conv2DWithBiasesRelu6Uint8Test(std::vector<armnn::BackendId> & backends)177 void Conv2DWithBiasesRelu6Uint8Test(std::vector<armnn::BackendId>& backends)
178 {
179     // Set input data
180     std::vector<int32_t> inputShape { 1, 2, 2, 1 };
181     std::vector<int32_t> filterShape { 1, 2, 2, 1 };
182     std::vector<int32_t> biasShape { 1 };
183     std::vector<int32_t> outputShape { 1, 2, 2, 1 };
184 
185     static std::vector<uint8_t> inputValues = { 1, 2, 4, 1 };
186 
187     std::vector<uint8_t> filterValues = { 2, 1, 0, 6 };
188 
189     std::vector<int32_t> biasValues = { 0 };
190 
191     // factors to consider:
192     // - the output scale is 2 hence the /2
193     // - RELU6 cuts output values at +6
194     uint8_t relu6Min = 6 / 2; // divide by output scale
195 
196     std::vector<uint8_t> expectedOutputValues =
197         {
198             std::min(relu6Min, static_cast<uint8_t>((1 * 2 + 2 * 1 + 4 * 0 + 1 * 6) / 2)),
199             std::min(relu6Min, static_cast<uint8_t>((2 * 2 + 0 * 1 + 1 * 0 + 0 * 6) / 2)),
200             std::min(relu6Min, static_cast<uint8_t>((4 * 2 + 1 * 1 + 0 * 0 + 0 * 6) / 2)),
201             std::min(relu6Min, static_cast<uint8_t>((1 * 2 + 0 * 1 + 0 * 0 + 0 * 6) / 2))
202         };
203 
204     tflite::Padding padding = tflite::Padding_SAME;
205 
206     ConvolutionTest<uint8_t, int32_t>(tflite::BuiltinOperator_CONV_2D,
207                                             ::tflite::TensorType_UINT8,
208                                             1, // strideX
209                                             1, // strideY
210                                             1, // dilationX
211                                             1, // dilationY
212                                             padding,
213                                             tflite::ActivationFunctionType_RELU6,
214                                             backends,
215                                             inputShape,
216                                             filterShape,
217                                             outputShape,
218                                             inputValues,
219                                             filterValues,
220                                             expectedOutputValues,
221                                             biasShape,
222                                             biasValues);
223 }
224 
225 
Conv2DPerChannelInt8Test(std::vector<armnn::BackendId> & backends)226 void Conv2DPerChannelInt8Test(std::vector<armnn::BackendId>& backends)
227 {
228     // Set input data
229     std::vector<int32_t> inputShape  { 1,4,4,2 };
230     std::vector<int32_t> filterShape { 4,2,2,2 };
231     std::vector<int32_t> biasShape   { 4 };
232     std::vector<int32_t> outputShape { 1,4,4,4 };
233 
234     static std::vector<int8_t> inputValues =
235         {
236             -11, 40,-26, 11,-28,  8,  0, -8,
237             -10, 34, 47,  0,-33,-14, 28, 35,
238               6,-28,-26,  8, 13, 33,-31,-41,
239              31,-20,-31,-16,  8,-18,-44,  0
240         };
241 
242     std::vector<float>  filterScales = { 1.858268, 2.0, 1.992126, 1.905512 };
243     int32_t filterQuantizationDim    = 0;
244     std::vector<int8_t> filterValues =
245         {
246              13,-44,  5,-14, 21,-45, 36,-25,
247             -42, -2, 24,-30,-31, 35, 43,-30,
248             -20, -5, 25, 17, 18, 20,  4,-46,
249             -49,  9, -3,-20, 46,  5,  7,-15
250         };
251 
252     std::vector<int32_t> biasValues = { 0,0,0,0 };
253     std::vector<float>   biasScales = { 0.721445, 0.7764700055, 0.773414, 0.739787 };
254 
255     std::vector<int8_t> expectedOutputValues =
256         {
257                -1,  9,  3, 5, 1, -1,  5,  9,
258                 2,  7, -1, 2, 2,  4,  5,  6,
259                 1,  1,  4, 4, 2,  0, -4, -3,
260                 0,  6, 12, 6, 3,  0, -1, -2,
261                 7, -4,  4, 4, 3,  6,  6,  2,
262                 0, -3, -1, 4, 4,  8,  3,  1,
263                 5,  0,  0, 1, 4,  7,  4,  6,
264                 4,  0,  1, 2, 2,  7,  5,  7
265         };
266     float outputQuantScale  = 401.960785f;
267     int   outputQuantOffset = 3;
268     float inputQuantScale   = 0.388235f;
269     int   inputQuantOffset  = 1;
270 
271     tflite::Padding padding = tflite::Padding_SAME;
272 
273     ConvolutionTest<int8_t, int32_t>(tflite::BuiltinOperator_CONV_2D,
274                                             ::tflite::TensorType_INT8,
275                                             1, // strideX
276                                             1, // strideY
277                                             1, // dilationX
278                                             1, // dilationY
279                                             padding,
280                                             tflite::ActivationFunctionType_NONE,
281                                             backends,
282                                             inputShape,
283                                             filterShape,
284                                             outputShape,
285                                             inputValues,
286                                             filterValues,
287                                             expectedOutputValues,
288                                             biasShape,
289                                             biasValues,
290                                             biasScales,
291                                             {0,0,0,0},
292                                             filterScales,
293                                             {0,0,0,0},
294                                             outputQuantScale,
295                                             outputQuantOffset,
296                                             inputQuantScale,
297                                             inputQuantOffset,
298                                             1, // depth_multiplier is ignored for conv2d value doesn't matter
299                                             filterQuantizationDim);
300 }
301 
302 TEST_SUITE("Convolution2dTest_CpuRefTests")
303 {
304 
305 TEST_CASE ("Conv2DWithBiases_Fp32_CpuRef_Test")
306 {
307     std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
308     Conv2DWithBiasesFp32Test(backends);
309 }
310 
311 TEST_CASE ("Conv2DWithBiases_Int8_CpuRef_Test")
312 {
313     std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
314     Conv2DWithBiasesInt8Test(backends);
315 }
316 
317 TEST_CASE ("Conv2DPerChannel_Int8_CpuRef_Test")
318 {
319     std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
320     Conv2DPerChannelInt8Test(backends);
321 }
322 
323 } //End of TEST_SUITE("Convolution2dTest_CpuRef")
324 
325 TEST_SUITE("Convolution2dTest_CpuAccTests")
326 {
327 
328 TEST_CASE ("Conv2DWithBiases_Fp32_CpuAcc_Test")
329 {
330 std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
331 Conv2DWithBiasesFp32Test(backends);
332 }
333 
334 TEST_CASE ("Conv2DWithBiases_Int8_CpuAcc_Test")
335 {
336 std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
337 Conv2DWithBiasesInt8Test(backends);
338 }
339 
340 TEST_CASE ("Conv2DPerChannel_Int8_CpuAcc_Test")
341 {
342     std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
343     Conv2DPerChannelInt8Test(backends);
344 }
345 
346 } //End of TEST_SUITE("Convolution2dTest_CpuAcc")
347 
348 TEST_SUITE("Convolution2dTest_GpuAccTests")
349 {
350 
351 TEST_CASE ("Conv2DWithBiases_Fp32_GpuAcc_Test")
352 {
353 std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
354 Conv2DWithBiasesFp32Test(backends);
355 }
356 
357 TEST_CASE ("Conv2DWithBiases_Int8_GpuAcc_Test")
358 {
359 std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
360 Conv2DWithBiasesInt8Test(backends);
361 }
362 
363 TEST_CASE ("Conv2DPerChannel_Int8_GpuAcc_Test")
364 {
365     std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
366     Conv2DPerChannelInt8Test(backends);
367 }
368 
369 } //End of TEST_SUITE("Convolution2dTest_GpuAcc")
370 
TransposeConvInt8Test(std::vector<armnn::BackendId> & backends)371 void TransposeConvInt8Test(std::vector<armnn::BackendId>& backends)
372 {
373     // Set input data
374     std::vector<int32_t> transposeTensorShape { 4 };
375     std::vector<int32_t> filterShape { 1, 2, 2, 1 };
376     std::vector<int32_t> inputShape { 1, 2, 2, 1 };
377     std::vector<int32_t> outputShape { 1, 3, 3, 1 };
378 
379     std::vector<int32_t> transposeData = { 1, 3, 3, 1 };
380     static std::vector<int8_t> inputValues = { 1, 2, 3, 4 };
381     std::vector<int8_t> filterValues = { 0, 1, 2, 4 };
382     std::vector<int8_t> expectedOutputValues =
383         {
384             0, 1,  2,
385             2, 11, 12,
386             6, 20, 16
387         };
388 
389     tflite::Padding padding = tflite::Padding_VALID;
390     TransposeConvTest<int8_t>(backends,
391                              ::tflite::TensorType_INT8,
392                              1, // strideX
393                              1, // strideY
394                              padding,
395                              transposeTensorShape,
396                              filterShape,
397                              inputShape,
398                              outputShape,
399                              transposeData,
400                              filterValues,
401                              inputValues,
402                              expectedOutputValues);
403 }
404 
TransposeConvFp32Test(std::vector<armnn::BackendId> & backends)405 void TransposeConvFp32Test(std::vector<armnn::BackendId>& backends)
406 {
407     std::vector<int32_t> transposeTensorShape { 4 };
408     std::vector<int32_t> filterShape { 1, 2, 2, 1 };
409     std::vector<int32_t> inputShape { 1, 2, 2, 1 };
410     std::vector<int32_t> outputShape { 1, 3, 3, 1 };
411 
412     std::vector<int32_t> transposeData = { 1, 3, 3, 1 };
413     static std::vector<float> inputValues = { 1, 2, 3, 4 };
414     std::vector<float> filterValues = { 0, 1, 2, 4 };
415     std::vector<float> expectedOutputValues =
416         {
417             0, 1,  2,
418             2, 11, 12,
419             6, 20, 16
420         };
421 
422     tflite::Padding padding = tflite::Padding_VALID;
423     TransposeConvTest<float>(backends,
424                              ::tflite::TensorType_FLOAT32,
425                              1, // strideX
426                              1, // strideY
427                              padding,
428                              transposeTensorShape,
429                              filterShape,
430                              inputShape,
431                              outputShape,
432                              transposeData,
433                              filterValues,
434                              inputValues,
435                              expectedOutputValues);
436 }
437 
438 TEST_SUITE("TransposeConv_CpuRef_Test")
439 {
440 
441 TEST_CASE ("TransposeConv_CpuRef_Fp32_Test")
442 {
443     std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
444     TransposeConvFp32Test(backends);
445 }
446 
447 TEST_CASE ("TransposeConv_CpuRef_Int8_Test")
448 {
449     std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
450     TransposeConvInt8Test(backends);
451 }
452 
453 } // End of  TEST_SUITE(TransposeConv_CpuRef_Test)
454 
455 TEST_SUITE("TransposeConv_CpuAcc_Test")
456 {
457 
458 TEST_CASE ("TransposeConv_CpuAcc_Fp32_Test")
459 {
460     std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
461     TransposeConvFp32Test(backends);
462 }
463 
464 TEST_CASE ("TransposeConv_CpuAcc_Int8_Test")
465 {
466     std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
467     TransposeConvInt8Test(backends);
468 }
469 
470 } // End of  TEST_SUITE(TransposeConv_CpuAcc_Test)
471 
472 TEST_SUITE("TransposeConv_GpuAcc_Test")
473 {
474 
475 TEST_CASE ("TransposeConv_GpuAcc_Fp32_Test")
476 {
477     std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
478     TransposeConvFp32Test(backends);
479 }
480 
481 TEST_CASE ("TransposeConv_GpuAcc_Int8_Test")
482 {
483     std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
484     TransposeConvInt8Test(backends);
485 }
486 
487 } // End of  TEST_SUITE(TransposeConv_GpuAcc_Test)
488 
489 } // namespace armnnDelegate