xref: /aosp_15_r20/external/armnn/delegate/test/FullyConnectedTest.cpp (revision 89c4ff92f2867872bb9e2354d150bf0c8c502810)
1 //
2 // Copyright © 2020-2021,2023 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "FullyConnectedTestHelper.hpp"
7 
8 namespace
9 {
10 
FullyConnectedFp32Test(std::vector<armnn::BackendId> & backends,bool constantWeights=true)11 void FullyConnectedFp32Test(std::vector<armnn::BackendId>& backends, bool constantWeights = true)
12 {
13     std::vector<int32_t> inputTensorShape   { 1, 4, 1, 1 };
14     std::vector<int32_t> weightsTensorShape { 1, 4 };
15     std::vector<int32_t> biasTensorShape    { 1 };
16     std::vector<int32_t> outputTensorShape  { 1, 1 };
17 
18     std::vector<float> inputValues = { 10, 20, 30, 40 };
19     std::vector<float> weightsData = { 2, 3, 4, 5 };
20 
21     std::vector<float> expectedOutputValues = { (400 + 10) };
22 
23     // bias is set std::vector<float> biasData = { 10 } in the model
24     FullyConnectedTest<float>(backends,
25                               ::tflite::TensorType_FLOAT32,
26                               tflite::ActivationFunctionType_NONE,
27                               inputTensorShape,
28                               weightsTensorShape,
29                               biasTensorShape,
30                               outputTensorShape,
31                               inputValues,
32                               expectedOutputValues,
33                               weightsData,
34                               constantWeights);
35 }
36 
FullyConnectedActivationTest(std::vector<armnn::BackendId> & backends,bool constantWeights=true)37 void FullyConnectedActivationTest(std::vector<armnn::BackendId>& backends, bool constantWeights = true)
38 {
39     std::vector<int32_t> inputTensorShape   { 1, 4, 1, 1 };
40     std::vector<int32_t> weightsTensorShape { 1, 4 };
41     std::vector<int32_t> biasTensorShape    { 1 };
42     std::vector<int32_t> outputTensorShape  { 1, 1 };
43 
44     std::vector<float> inputValues = { -10, 20, 30, 40 };
45     std::vector<float> weightsData = { 2, 3, 4, -5 };
46 
47     std::vector<float> expectedOutputValues = { 0 };
48 
49     // bias is set std::vector<float> biasData = { 10 } in the model
50     FullyConnectedTest<float>(backends,
51                               ::tflite::TensorType_FLOAT32,
52                               tflite::ActivationFunctionType_RELU,
53                               inputTensorShape,
54                               weightsTensorShape,
55                               biasTensorShape,
56                               outputTensorShape,
57                               inputValues,
58                               expectedOutputValues,
59                               weightsData,
60                               constantWeights);
61 }
62 
FullyConnectedInt8Test(std::vector<armnn::BackendId> & backends,bool constantWeights=true)63 void FullyConnectedInt8Test(std::vector<armnn::BackendId>& backends, bool constantWeights = true)
64 {
65     std::vector<int32_t> inputTensorShape   { 1, 4, 2, 1 };
66     std::vector<int32_t> weightsTensorShape { 1, 4 };
67     std::vector<int32_t> biasTensorShape    { 1 };
68     std::vector<int32_t> outputTensorShape  { 2, 1 };
69 
70     std::vector<int8_t> inputValues = { 1, 2, 3, 4, 5, 10, 15, 20 };
71     std::vector<int8_t> weightsData = { 2, 3, 4, 5 };
72 
73     std::vector<int8_t> expectedOutputValues = { 25, 105 };  // (40 + 10) / 2, (200 + 10) / 2
74 
75     // bias is set std::vector<int32_t> biasData = { 10 } in the model
76     // input and weights quantization scale 1.0f and offset 0 in the model
77     // output quantization scale 2.0f and offset 0 in the model
78     FullyConnectedTest<int8_t>(backends,
79                                 ::tflite::TensorType_INT8,
80                                 tflite::ActivationFunctionType_NONE,
81                                 inputTensorShape,
82                                 weightsTensorShape,
83                                 biasTensorShape,
84                                 outputTensorShape,
85                                 inputValues,
86                                 expectedOutputValues,
87                                 weightsData,
88                                 constantWeights);
89 }
90 
91 TEST_SUITE("FullyConnected_GpuAccTests")
92 {
93 
94 TEST_CASE ("FullyConnected_FP32_GpuAcc_Test")
95 {
96     std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
97     FullyConnectedFp32Test(backends);
98 }
99 
100 TEST_CASE ("FullyConnected_Int8_GpuAcc_Test")
101 {
102     std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
103     FullyConnectedInt8Test(backends);
104 }
105 
106 TEST_CASE ("FullyConnected_Activation_GpuAcc_Test")
107 {
108     std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
109     FullyConnectedActivationTest(backends);
110 }
111 
112 } // End of TEST_SUITE("FullyConnected_GpuAccTests")
113 
114 TEST_SUITE("FullyConnected_CpuAccTests")
115 {
116 
117 TEST_CASE ("FullyConnected_FP32_CpuAcc_Test")
118 {
119     std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
120     FullyConnectedFp32Test(backends);
121 }
122 
123 TEST_CASE ("FullyConnected_Int8_CpuAcc_Test")
124 {
125     std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
126     FullyConnectedInt8Test(backends);
127 }
128 
129 TEST_CASE ("FullyConnected_Activation_CpuAcc_Test")
130 {
131     std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
132     FullyConnectedActivationTest(backends);
133 }
134 
135 } // End of TEST_SUITE("FullyConnected_CpuAccTests")
136 
137 TEST_SUITE("FullyConnected_CpuRefTests")
138 {
139 
140 TEST_CASE ("FullyConnected_FP32_CpuRef_Test")
141 {
142     std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
143     FullyConnectedFp32Test(backends);
144 }
145 
146 TEST_CASE ("FullyConnected_Int8_CpuRef_Test")
147 {
148     std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
149     FullyConnectedInt8Test(backends);
150 }
151 
152 TEST_CASE ("FullyConnected_Activation_CpuRef_Test")
153 {
154     std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
155     FullyConnectedActivationTest(backends);
156 }
157 
158 TEST_CASE ("FullyConnected_Weights_As_Inputs_FP32_CpuRef_Test")
159 {
160     std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
161     FullyConnectedFp32Test(backends, false);
162 }
163 
164 TEST_CASE ("FullyConnected_Weights_As_Inputs_Int8_CpuRef_Test")
165 {
166     std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
167     FullyConnectedInt8Test(backends, false);
168 }
169 
170 TEST_CASE ("FullyConnected_Weights_As_Inputs_Activation_CpuRef_Test")
171 {
172     std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
173     FullyConnectedActivationTest(backends, false);
174 }
175 
176 } // End of TEST_SUITE("FullyConnected_CpuRefTests")
177 
178 } // anonymous namespace