xref: /aosp_15_r20/external/armnn/src/backends/reference/workloads/RefSliceWorkload.cpp (revision 89c4ff92f2867872bb9e2354d150bf0c8c502810)
1 //
2 // Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "RefSliceWorkload.hpp"
7 
8 #include "RefWorkloadUtils.hpp"
9 #include "Slice.hpp"
10 
11 #include <Profiling.hpp>
12 
13 namespace armnn
14 {
15 
Execute() const16 void RefSliceWorkload::Execute() const
17 {
18     Execute(m_Data.m_Inputs, m_Data.m_Outputs);
19 }
20 
ExecuteAsync(ExecutionData & executionData)21 void RefSliceWorkload::ExecuteAsync(ExecutionData& executionData)
22 {
23     WorkingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data);
24     Execute(workingMemDescriptor->m_Inputs, workingMemDescriptor->m_Outputs);
25 }
26 
Execute(std::vector<ITensorHandle * > inputs,std::vector<ITensorHandle * > outputs) const27 void RefSliceWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
28 {
29     ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefSliceWorkload_Execute");
30 
31     const TensorInfo& inputInfo  = GetTensorInfo(inputs[0]);
32 
33     Slice(inputInfo,
34           m_Data.m_Parameters,
35           inputs[0]->Map(),
36           outputs[0]->Map(),
37           GetDataTypeSize(inputInfo.GetDataType()));
38 }
39 
40 } // namespace armnn
41