xref: /aosp_15_r20/external/armnn/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp (revision 89c4ff92f2867872bb9e2354d150bf0c8c502810)
1 //
2 // Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <CommonTestUtils.hpp>
7 
8 #include <Graph.hpp>
9 
10 #include <armnn/backends/TensorHandle.hpp>
11 #include <armnn/backends/WorkloadData.hpp>
12 
13 #include <doctest/doctest.h>
14 
15 using namespace armnn;
16 using namespace std;
17 
18 /////////////////////////////////////////////////////////////////////////////////////////////
19 // The following test are created specifically to test ReleaseConstantData() method in the Layer
20 // They build very simple graphs including the layer will be checked.
21 // Checks weights and biases before the method called and after.
22 /////////////////////////////////////////////////////////////////////////////////////////////
23 
24 TEST_SUITE("LayerReleaseConstantDataTest")
25 {
26 TEST_CASE("ReleaseBatchNormalizationLayerConstantDataTest")
27 {
28     Graph graph;
29 
30     // create the layer we're testing
31     BatchNormalizationDescriptor layerDesc;
32     layerDesc.m_Eps = 0.05f;
33     BatchNormalizationLayer* const layer = graph.AddLayer<BatchNormalizationLayer>(layerDesc, "layer");
34 
35     armnn::TensorInfo weightInfo({3}, armnn::DataType::Float32);
36     layer->m_Mean     = std::make_unique<ScopedTensorHandle>(weightInfo);
37     layer->m_Variance = std::make_unique<ScopedTensorHandle>(weightInfo);
38     layer->m_Beta     = std::make_unique<ScopedTensorHandle>(weightInfo);
39     layer->m_Gamma    = std::make_unique<ScopedTensorHandle>(weightInfo);
40     layer->m_Mean->Allocate();
41     layer->m_Variance->Allocate();
42     layer->m_Beta->Allocate();
43     layer->m_Gamma->Allocate();
44 
45     // create extra layers
46     Layer* const input = graph.AddLayer<InputLayer>(0, "input");
47     Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
48 
49     // connect up
50     armnn::TensorInfo tensorInfo({2, 3, 1, 1}, armnn::DataType::Float32);
51     Connect(input, layer, tensorInfo);
52     Connect(layer, output, tensorInfo);
53 
54     // check the constants that they are not NULL
55     CHECK(layer->m_Mean != nullptr);
56     CHECK(layer->m_Variance != nullptr);
57     CHECK(layer->m_Beta != nullptr);
58     CHECK(layer->m_Gamma != nullptr);
59 
60     // free up the constants..
61     layer->ReleaseConstantData();
62 
63     // check the constants that they are NULL now
64     CHECK(layer->m_Mean == nullptr);
65     CHECK(layer->m_Variance == nullptr);
66     CHECK(layer->m_Beta == nullptr);
67     CHECK(layer->m_Gamma == nullptr);
68 
69  }
70 
71 TEST_CASE("ReleaseConvolution2dLayerConstantDataTest")
72 {
73     Graph graph;
74 
75     // create the layer we're testing
76     Convolution2dDescriptor layerDesc;
77     layerDesc.m_PadLeft = 3;
78     layerDesc.m_PadRight = 3;
79     layerDesc.m_PadTop = 1;
80     layerDesc.m_PadBottom = 1;
81     layerDesc.m_StrideX = 2;
82     layerDesc.m_StrideY = 4;
83     layerDesc.m_BiasEnabled = true;
84 
85     auto* const convolutionLayer = graph.AddLayer<Convolution2dLayer>(layerDesc, "convolution");
86     auto* const weightsLayer = graph.AddLayer<ConstantLayer>("weights");
87     auto* const biasLayer = graph.AddLayer<ConstantLayer>("bias");
88 
89     TensorInfo weightsInfo = TensorInfo({ 2, 3, 5, 3 }, armnn::DataType::Float32, 1.0, 0.0, true);
90     TensorInfo biasInfo = TensorInfo({ 2 }, GetBiasDataType(armnn::DataType::Float32), 1.0, 0.0, true);
91 
92     weightsLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(weightsInfo);
93     biasLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(biasInfo);
94 
95     weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsInfo);
96     biasLayer->GetOutputSlot(0).SetTensorInfo(biasInfo);
97 
98     // create extra layers
99     Layer* const input = graph.AddLayer<InputLayer>(0, "input");
100     Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
101 
102     // connect up
103     Connect(input, convolutionLayer, TensorInfo({ 2, 3, 8, 16 }, armnn::DataType::Float32));
104     weightsLayer->GetOutputSlot().Connect(convolutionLayer->GetInputSlot(1));
105     biasLayer->GetOutputSlot().Connect(convolutionLayer->GetInputSlot(2));
106     Connect(convolutionLayer, output, TensorInfo({ 2, 2, 2, 10 }, armnn::DataType::Float32));
107 
108     // check the constants that they are not NULL
109     CHECK(weightsLayer->m_LayerOutput != nullptr);
110     CHECK(biasLayer->m_LayerOutput != nullptr);
111 
112     // free up the constants.
113     convolutionLayer->ReleaseConstantData();
114 
115     // check the constants that they are still not NULL
116     CHECK(weightsLayer->m_LayerOutput != nullptr);
117     CHECK(biasLayer->m_LayerOutput != nullptr);
118 }
119 
120 TEST_CASE("ReleaseDepthwiseConvolution2dLayerConstantDataTest")
121 {
122     Graph graph;
123 
124     // create the layer we're testing
125     DepthwiseConvolution2dDescriptor layerDesc;
126     layerDesc.m_PadLeft         = 3;
127     layerDesc.m_PadRight        = 3;
128     layerDesc.m_PadTop          = 1;
129     layerDesc.m_PadBottom       = 1;
130     layerDesc.m_StrideX         = 2;
131     layerDesc.m_StrideY         = 4;
132     layerDesc.m_BiasEnabled     = true;
133 
134     auto* const convolutionLayer = graph.AddLayer<DepthwiseConvolution2dLayer>(layerDesc, "convolution");
135     auto* const weightsLayer = graph.AddLayer<ConstantLayer>("weights");
136     auto* const biasLayer = graph.AddLayer<ConstantLayer>("bias");
137 
138     TensorInfo weightsInfo = TensorInfo({ 3, 3, 5, 3 }, armnn::DataType::Float32, 1.0, 0.0, true);
139     TensorInfo biasInfo = TensorInfo({ 9 }, GetBiasDataType(armnn::DataType::Float32), 1.0, 0.0, true);
140 
141     weightsLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(weightsInfo);
142     biasLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(biasInfo);
143 
144     weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsInfo);
145     biasLayer->GetOutputSlot(0).SetTensorInfo(biasInfo);
146 
147     // create extra layers
148     Layer* const input = graph.AddLayer<InputLayer>(0, "input");
149     Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
150 
151     // connect up
152     Connect(input, convolutionLayer, TensorInfo({2, 3, 8, 16}, armnn::DataType::Float32));
153     weightsLayer->GetOutputSlot().Connect(convolutionLayer->GetInputSlot(1));
154     biasLayer->GetOutputSlot().Connect(convolutionLayer->GetInputSlot(2));
155     Connect(convolutionLayer, output, TensorInfo({2, 9, 2, 10}, armnn::DataType::Float32));
156 
157     // check the constants that they are not NULL
158     CHECK(weightsLayer->m_LayerOutput != nullptr);
159     CHECK(biasLayer->m_LayerOutput != nullptr);
160 
161     // free up the constants.
162     convolutionLayer->ReleaseConstantData();
163 
164     // check the constants that they are still not NULL
165     CHECK(weightsLayer->m_LayerOutput != nullptr);
166     CHECK(biasLayer->m_LayerOutput != nullptr);
167 }
168 
169 TEST_CASE("ReleaseFullyConnectedLayerConstantDataTest")
170 {
171     Graph graph;
172 
173     // create the layer we're testing
174     FullyConnectedDescriptor layerDesc;
175     layerDesc.m_BiasEnabled = true;
176     layerDesc.m_TransposeWeightMatrix = true;
177 
178     auto* const fullyConnectedLayer = graph.AddLayer<FullyConnectedLayer>(layerDesc, "layer");
179     auto* const weightsLayer = graph.AddLayer<ConstantLayer>("weights");
180     auto* const biasLayer = graph.AddLayer<ConstantLayer>("bias");
181 
182     float inputsQScale = 1.0f;
183     float outputQScale = 2.0f;
184 
185     TensorInfo weightsInfo = TensorInfo({ 7, 20 }, DataType::QAsymmU8, inputsQScale, 0.0, true);
186     TensorInfo biasInfo = TensorInfo({ 7 }, GetBiasDataType(DataType::QAsymmU8), inputsQScale, 0.0, true);
187 
188     weightsLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(weightsInfo);
189     biasLayer->m_LayerOutput = std::make_shared<ScopedTensorHandle>(biasInfo);
190 
191     weightsLayer->GetOutputSlot(0).SetTensorInfo(weightsInfo);
192     biasLayer->GetOutputSlot(0).SetTensorInfo(biasInfo);
193 
194     // create extra layers
195     Layer* const input = graph.AddLayer<InputLayer>(0, "input");
196     Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
197 
198     // connect up
199     Connect(input, fullyConnectedLayer, TensorInfo({ 3, 1, 4, 5 }, DataType::QAsymmU8, inputsQScale));
200     weightsLayer->GetOutputSlot().Connect(fullyConnectedLayer->GetInputSlot(1));
201     biasLayer->GetOutputSlot().Connect(fullyConnectedLayer->GetInputSlot(2));
202     Connect(fullyConnectedLayer, output, TensorInfo({ 3, 7 }, DataType::QAsymmU8, outputQScale));
203 
204     // check the constants that they are not NULL
205     CHECK(weightsLayer->m_LayerOutput != nullptr);
206     CHECK(biasLayer->m_LayerOutput != nullptr);
207 
208     // free up the constants.
209     fullyConnectedLayer->ReleaseConstantData();
210 
211     // check the constants that they are still not NULL
212     CHECK(weightsLayer->m_LayerOutput != nullptr);
213     CHECK(biasLayer->m_LayerOutput != nullptr);
214 }
215 
216 }
217 
218