xref: /aosp_15_r20/external/armnn/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp (revision 89c4ff92f2867872bb9e2354d150bf0c8c502810)
1 //
2 // Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include <TestUtils.hpp>
7 
8 #include <Optimizer.hpp>
9 
10 #include <doctest/doctest.h>
11 
12 TEST_SUITE("Optimizer")
13 {
14 using namespace armnn::optimizations;
15 
16 TEST_CASE("ConvertConstantsHalfToFloatTest")
17 {
18     armnn::Graph graph;
19 
20     const armnn::TensorInfo info({ 1, 1, 1, 2 }, armnn::DataType::Float32);
21 
22     // Create the half precision input data
23     unsigned int dims[] = { 4, 1, 1, 1 };
24     std::vector<float> convWeightsData{ 1.f, 2.f, 3.f, 4.f };
25     std::vector<uint16_t> halfWeights(4);
26     armnnUtils::FloatingPointConverter::ConvertFloat32To16(convWeightsData.data(), convWeightsData.size(),
27                                                            halfWeights.data());
28     armnn::TensorInfo weightInfo = armnn::TensorInfo(4, dims, armnn::DataType::Float16, 0.0f, 0, true);
29     armnn::ConstTensor weights(weightInfo, halfWeights);
30 
31     //Create the simple test network
32     auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
33     input->GetOutputSlot().SetTensorInfo(info);
34 
35     auto fc      = graph.AddLayer<armnn::FullyConnectedLayer>(armnn::FullyConnectedDescriptor(), "fc");
36     fc->GetOutputSlot().SetTensorInfo(info);
37 
38     auto weightsLayer = graph.AddLayer<armnn::ConstantLayer>("weights");
39     weightsLayer->m_LayerOutput = std::make_unique<armnn::ScopedTensorHandle>(weights);
40     weightsLayer->GetOutputSlot(0).SetTensorInfo(weightInfo);
41 
42     auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
43 
44     //Connect up the layers
45     input->GetOutputSlot().Connect(fc->GetInputSlot(0));
46     weightsLayer->GetOutputSlot().Connect(fc->GetInputSlot(1));
47     fc->GetOutputSlot().Connect(output->GetInputSlot(0));
48 
49     //Test the tensor info is correct.
50     CHECK(weightsLayer->m_LayerOutput->GetTensorInfo().GetDataType() == armnn::DataType::Float16);
51 
52     // Run the optimizer
53     armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(ConvertConstantsHalfToFloat()));
54 
55     //Test the tensor info is correct.
56     CHECK(weightsLayer->m_LayerOutput->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
57 
58     // Now test the data matches float32 data
59     const float* data = weightsLayer->m_LayerOutput->GetConstTensor<float>();
60     CHECK(1.0f == data[0]);
61     CHECK(2.0f == data[1]);
62     CHECK(3.0f == data[2]);
63     CHECK(4.0f == data[3]);
64 }
65 
66 }