1 //
2 // Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include "NeonExpWorkload.hpp"
7
8 #include "NeonWorkloadUtils.hpp"
9
10 #include <aclCommon/ArmComputeTensorHandle.hpp>
11 #include <aclCommon/ArmComputeTensorUtils.hpp>
12 #include <armnn/utility/PolymorphicDowncast.hpp>
13
14 namespace armnn
15 {
16
NeonExpWorkloadValidate(const TensorInfo & input,const TensorInfo & output)17 arm_compute::Status NeonExpWorkloadValidate(const TensorInfo& input, const TensorInfo& output)
18 {
19 const arm_compute::TensorInfo aclInput = armcomputetensorutils::BuildArmComputeTensorInfo(input);
20 const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output);
21
22 return arm_compute::NEExpLayer::validate(&aclInput, &aclOutput);
23 }
24
NeonExpWorkload(const ElementwiseUnaryQueueDescriptor & descriptor,const WorkloadInfo & info)25 NeonExpWorkload::NeonExpWorkload(const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info)
26 : NeonBaseWorkload<ElementwiseUnaryQueueDescriptor>(descriptor, info)
27 {
28 // Report Profiling Details
29 ARMNN_REPORT_PROFILING_WORKLOAD_DESC("NeonExpWorkload_Construct",
30 descriptor.m_Parameters,
31 info,
32 this->GetGuid());
33
34 m_Data.ValidateInputsOutputs("NeonExpWorkload", 1, 1);
35
36 arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
37 arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
38
39 m_ExpLayer.configure(&input, &output);
40 }
41
Execute() const42 void NeonExpWorkload::Execute() const
43 {
44 ARMNN_SCOPED_PROFILING_EVENT_NEON_GUID("NeonExpWorkload_Execute", this->GetGuid());
45 m_ExpLayer.run();
46 }
47
48 } // namespace armnn
49