1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include "ConvertFp16ToFp32TestImpl.hpp"
7
8 #include <Half.hpp>
9
10
11 #include <armnnTestUtils/TensorCopyUtils.hpp>
12 #include <armnnTestUtils/WorkloadTestUtils.hpp>
13
14 #include <armnnTestUtils/TensorHelpers.hpp>
15
SimpleConvertFp16ToFp32Test(armnn::IWorkloadFactory & workloadFactory,const armnn::IBackendInternal::IMemoryManagerSharedPtr & memoryManager,const armnn::ITensorHandleFactory & tensorHandleFactory)16 LayerTestResult<float, 4> SimpleConvertFp16ToFp32Test(
17 armnn::IWorkloadFactory& workloadFactory,
18 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
19 const armnn::ITensorHandleFactory& tensorHandleFactory)
20 {
21 IgnoreUnused(memoryManager);
22 using namespace half_float::literal;
23
24 const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float16);
25 const armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
26
27 std::vector<armnn::Half> input =
28 {
29 -37.5_h, -15.2_h, -8.76_h, -2.0_h, -1.5_h, -1.3_h, -0.5_h, -0.4_h, 0.0_h,
30 1.0_h, 0.4_h, 0.5_h, 1.3_h, 1.5_h, 2.0_h, 8.76_h, 15.2_h, 37.5_h
31 };
32
33 std::vector<float> expectedOutput =
34 {
35 -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
36 1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f
37 };
38
39 std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
40
41 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
42 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
43
44 armnn::ConvertFp16ToFp32QueueDescriptor data;
45 armnn::WorkloadInfo info;
46 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
47 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
48
49 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::ConvertFp16ToFp32,
50 data,
51 info);
52
53 inputHandle->Allocate();
54 outputHandle->Allocate();
55
56 CopyDataToITensorHandle(inputHandle.get(), input.data());
57
58 workload->Execute();
59
60 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
61
62 return LayerTestResult<float, 4>(actualOutput,
63 expectedOutput,
64 outputHandle->GetShape(),
65 outputTensorInfo.GetShape());
66 }
67