xref: /aosp_15_r20/external/armnn/src/backends/neon/workloads/NeonConvertFp32ToFp16Workload.cpp (revision 89c4ff92f2867872bb9e2354d150bf0c8c502810)
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "NeonConvertFp32ToFp16Workload.hpp"
7 
8 #include <Half.hpp>
9 #include <Profiling.hpp>
10 
11 #include <armnnUtils/FloatingPointConverter.hpp>
12 
13 #include <backendsCommon/WorkloadUtils.hpp>
14 
15 namespace armnn
16 {
17 
NeonConvertFp32ToFp16Workload(const ConvertFp32ToFp16QueueDescriptor & descriptor,const WorkloadInfo & info)18 NeonConvertFp32ToFp16Workload::NeonConvertFp32ToFp16Workload(const ConvertFp32ToFp16QueueDescriptor& descriptor,
19                                                              const WorkloadInfo& info)
20     : Float32ToFloat16Workload<ConvertFp32ToFp16QueueDescriptor>(descriptor, info)
21 {
22     this->m_Data.ValidateInputsOutputs("NeonConvertFp32ToFp16Workload", 1, 1);
23     GatherTensorHandlePairs(descriptor, m_TensorHandlePairs);
24 }
25 
Execute() const26 void NeonConvertFp32ToFp16Workload::Execute() const
27 {
28     ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonConvertFp32ToFp16Workload_Execute", this->GetGuid());
29 
30     auto convertFunc = [](uint8_t* dst, const uint8_t* src, size_t size)
31         {
32             auto input = reinterpret_cast<const float*>(src);
33             auto output = reinterpret_cast<Half*>(dst);
34             size_t numElements = size/2; // 2 bytes per fp16
35             armnnUtils::FloatingPointConverter::ConvertFloat32To16(input, numElements, output);
36         };
37 
38     for (const auto& pair : m_TensorHandlePairs)
39     {
40         CopyTensorContentsGeneric(pair.first, pair.second, convertFunc);
41     }
42 }
43 
ReplaceInputTensorHandle(ITensorHandle * tensorHandle,unsigned int slot)44 void NeonConvertFp32ToFp16Workload::ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
45 {
46     ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
47     this->m_Data.m_Inputs[slot] = tensorHandle;
48     try
49     {
50         Reconfigure();
51     }
52     catch(armnn::UnimplementedException& e)
53     {
54         // Cannot reconfigure, revert the slot back and throw the exception.
55         this->m_Data.m_Inputs[slot] = backupHandle;
56         throw e;
57     }
58 }
59 
60 // Replace output tensor handle with the given TensorHandle
ReplaceOutputTensorHandle(ITensorHandle * tensorHandle,unsigned int slot)61 void NeonConvertFp32ToFp16Workload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
62 {
63     ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
64     this->m_Data.m_Inputs[slot] = tensorHandle;
65     try
66     {
67         Reconfigure();
68     }
69     catch(armnn::UnimplementedException& e)
70     {
71         // Cannot reconfigure, revert the slot back and throw the exception.
72         this->m_Data.m_Inputs[slot] = backupHandle;
73         throw e;
74     }
75 }
76 
Reconfigure()77 void NeonConvertFp32ToFp16Workload::Reconfigure()
78 {
79     throw armnn::UnimplementedException("Reconfigure not implemented for this workload");
80 }
81 
82 } //namespace armnn
83