1 //
2 // Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include "RefInstanceNormalizationWorkload.hpp"
7
8 #include "InstanceNorm.hpp"
9 #include "RefWorkloadUtils.hpp"
10
11 #include "Profiling.hpp"
12
13 namespace armnn
14 {
15
RefInstanceNormalizationWorkload(const InstanceNormalizationQueueDescriptor & descriptor,const WorkloadInfo & info)16 RefInstanceNormalizationWorkload::RefInstanceNormalizationWorkload(
17 const InstanceNormalizationQueueDescriptor& descriptor,
18 const WorkloadInfo& info)
19 : RefBaseWorkload<InstanceNormalizationQueueDescriptor>(descriptor, info) {}
20
Execute() const21 void RefInstanceNormalizationWorkload::Execute() const
22 {
23 Execute(m_Data.m_Inputs, m_Data.m_Outputs);
24 }
25
ExecuteAsync(ExecutionData & executionData)26 void RefInstanceNormalizationWorkload::ExecuteAsync(ExecutionData& executionData)
27 {
28 WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
29 Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
30 }
31
Execute(std::vector<ITensorHandle * > inputs,std::vector<ITensorHandle * > outputs) const32 void RefInstanceNormalizationWorkload::Execute(std::vector<ITensorHandle*> inputs,
33 std::vector<ITensorHandle*> outputs) const
34 {
35 ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefInstanceNormalizationWorkload_Execute");
36
37 std::unique_ptr<Decoder<float>> inputDecoder = MakeDecoder<float>(GetTensorInfo(inputs[0]),
38 inputs[0]->Map());
39 std::unique_ptr<Encoder<float>> outputEncoder = MakeEncoder<float>(GetTensorInfo(outputs[0]),
40 outputs[0]->Map());
41 const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
42
43 InstanceNorm(m_Data, inputInfo, *inputDecoder, *outputEncoder);
44 }
45
46 } // namespace armnn
47