xref: /aosp_15_r20/external/armnn/src/backends/cl/workloads/ClL2NormalizationFloatWorkload.cpp (revision 89c4ff92f2867872bb9e2354d150bf0c8c502810)
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ClL2NormalizationFloatWorkload.hpp"
7 #include <cl/ClTensorHandle.hpp>
8 #include <armnn/backends/TensorHandle.hpp>
9 #include <aclCommon/ArmComputeUtils.hpp>
10 
11 #include "ClWorkloadUtils.hpp"
12 
13 namespace armnn
14 {
15 using namespace armcomputetensorutils;
16 
ClL2NormalizationWorkloadValidate(const TensorInfo & input,const TensorInfo & output,const L2NormalizationDescriptor & descriptor)17 arm_compute::Status ClL2NormalizationWorkloadValidate(const TensorInfo& input,
18                                                       const TensorInfo& output,
19                                                       const L2NormalizationDescriptor& descriptor)
20 {
21     const arm_compute::TensorInfo aclInput  = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
22     const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
23 
24     int axis = (descriptor.m_DataLayout == DataLayout::NCHW) ? 2 : 0;
25 
26     return arm_compute::CLL2NormalizeLayer::validate(&aclInput, &aclOutput, axis, descriptor.m_Eps);
27 }
28 
ClL2NormalizationFloatWorkload(const L2NormalizationQueueDescriptor & descriptor,const WorkloadInfo & info,const arm_compute::CLCompileContext & clCompileContext)29 ClL2NormalizationFloatWorkload::ClL2NormalizationFloatWorkload(const L2NormalizationQueueDescriptor& descriptor,
30                                                                const WorkloadInfo& info,
31                                                                const arm_compute::CLCompileContext& clCompileContext)
32     : FloatWorkload<L2NormalizationQueueDescriptor>(descriptor, info)
33 {
34     // Report Profiling Details
35     ARMNN_REPORT_PROFILING_WORKLOAD_DESC("ClL2NormalizationFloatWorkload_Construct",
36                                          descriptor.m_Parameters,
37                                          info,
38                                          this->GetGuid());
39 
40     m_Data.ValidateInputsOutputs("ClL2NormalizationFloatWorkload", 1, 1);
41 
42     arm_compute::ICLTensor& input  = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
43     arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
44 
45     arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
46     input.info()->set_data_layout(aclDataLayout);
47     output.info()->set_data_layout(aclDataLayout);
48 
49     int axis = (m_Data.m_Parameters.m_DataLayout == DataLayout::NCHW) ? 2 : 0;
50 
51     {
52         ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "ClL2NormalizationFloatWorkload_configure");
53         m_Layer.configure(clCompileContext, &input, &output, axis, m_Data.m_Parameters.m_Eps);
54     }
55 }
56 
Execute() const57 void ClL2NormalizationFloatWorkload::Execute() const
58 {
59     ARMNN_SCOPED_PROFILING_EVENT_CL_GUID("ClL2NormalizationFloatWorkload_Execute", this->GetGuid());
60     RunClFunction(m_Layer, CHECK_LOCATION());
61 }
62 
ReplaceInputTensorHandle(ITensorHandle * tensorHandle,unsigned int slot)63 void ClL2NormalizationFloatWorkload::ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
64 {
65     ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
66     this->m_Data.m_Inputs[slot] = tensorHandle;
67     try
68     {
69         Reconfigure();
70     }
71     catch(armnn::UnimplementedException& e)
72     {
73         // Cannot reconfigure, revert the slot back and throw the exception.
74         this->m_Data.m_Inputs[slot] = backupHandle;
75         throw e;
76     }
77 }
78 
79 // Replace output tensor handle with the given TensorHandle
ReplaceOutputTensorHandle(ITensorHandle * tensorHandle,unsigned int slot)80 void ClL2NormalizationFloatWorkload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
81 {
82     ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
83     this->m_Data.m_Inputs[slot] = tensorHandle;
84     try
85     {
86         Reconfigure();
87     }
88     catch(armnn::UnimplementedException& e)
89     {
90         // Cannot reconfigure, revert the slot back and throw the exception.
91         this->m_Data.m_Inputs[slot] = backupHandle;
92         throw e;
93     }
94 }
95 
Reconfigure()96 void ClL2NormalizationFloatWorkload::Reconfigure()
97 {
98     throw armnn::UnimplementedException("Reconfigure not implemented for this workload");
99 }
100 
101 } //namespace armnn
102