1 /*
2 * Copyright (c) 2017-2022 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h"
25
26 #include "arm_compute/core/Size2D.h"
27 #include "arm_compute/core/Utils.h"
28 #include "arm_compute/core/Validate.h"
29 #include "arm_compute/runtime/Tensor.h"
30 #include "src/core/helpers/MemoryHelpers.h"
31 #include "src/cpu/operators/CpuGemmConv2d.h"
32
33 using namespace arm_compute::experimental;
34
35 namespace arm_compute
36 {
37 struct NEGEMMConvolutionLayer::Impl
38 {
39 const ITensor *weights{ nullptr };
40 std::unique_ptr<cpu::CpuGemmConv2d> op{ nullptr };
41 ITensorPack run_pack{};
42 MemoryGroup memory_group{};
43 IWeightsManager *weights_manager{ nullptr };
44 MemoryRequirements aux_mem_req{};
45 WorkspaceData<Tensor> workspace_tensors{};
46 bool is_prepared{ false };
47 };
48
NEGEMMConvolutionLayer(const std::shared_ptr<IMemoryManager> & memory_manager,IWeightsManager * weights_manager)49 NEGEMMConvolutionLayer::NEGEMMConvolutionLayer(const std::shared_ptr<IMemoryManager> &memory_manager, IWeightsManager *weights_manager)
50 : _impl(std::make_unique<Impl>())
51 {
52 _impl->weights_manager = weights_manager;
53 _impl->memory_group = MemoryGroup(memory_manager);
54 }
55 NEGEMMConvolutionLayer::~NEGEMMConvolutionLayer() = default;
56
configure(const ITensor * input,const ITensor * weights,const ITensor * biases,ITensor * output,const PadStrideInfo & conv_info,const WeightsInfo & weights_info,const Size2D & dilation,const ActivationLayerInfo & act_info,bool enable_fast_math,unsigned int num_groups)57 void NEGEMMConvolutionLayer::configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info,
58 const Size2D &dilation, const ActivationLayerInfo &act_info, bool enable_fast_math, unsigned int num_groups)
59 {
60 ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
61
62 _impl->weights = weights;
63 _impl->op = std::make_unique<cpu::CpuGemmConv2d>();
64 _impl->op->configure(input->info(), weights->info(), (biases != nullptr ? biases->info() : nullptr), output->info(), conv_info, weights_info, dilation, act_info, enable_fast_math, num_groups);
65
66 _impl->run_pack =
67 {
68 { TensorType::ACL_SRC_0, input },
69 { TensorType::ACL_SRC_1, weights },
70 { TensorType::ACL_SRC_2, biases },
71 { TensorType::ACL_DST, output }
72 };
73 _impl->aux_mem_req = _impl->op->workspace();
74 _impl->workspace_tensors = manage_workspace<Tensor>(_impl->aux_mem_req, _impl->memory_group, _impl->run_pack, _impl->run_pack);
75 }
76
validate(const ITensorInfo * input,const ITensorInfo * weights,const ITensorInfo * biases,const ITensorInfo * output,const PadStrideInfo & conv_info,const WeightsInfo & weights_info,const Size2D & dilation,const ActivationLayerInfo & act_info,bool enable_fast_math,unsigned int num_groups)77 Status NEGEMMConvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
78 const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, bool enable_fast_math, unsigned int num_groups)
79 {
80 return cpu::CpuGemmConv2d::validate(input, weights, biases, output, conv_info, weights_info, dilation, act_info, enable_fast_math, num_groups);
81 }
82
has_opt_impl(arm_compute::WeightFormat & expected_weight_format,const ITensorInfo * src,const ITensorInfo * weights,const ITensorInfo * biases,const ITensorInfo * dst,const PadStrideInfo & conv_info,const WeightsInfo & weights_info,const Size2D & dilation,const ActivationLayerInfo & act_info,const bool enable_fast_math)83 Status NEGEMMConvolutionLayer::has_opt_impl(arm_compute::WeightFormat &expected_weight_format, const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst,
84 const PadStrideInfo &conv_info,
85 const WeightsInfo &weights_info, const Size2D &dilation, const ActivationLayerInfo &act_info, const bool enable_fast_math)
86 {
87 return cpu::CpuGemmConv2d::has_opt_impl(expected_weight_format, src, weights, biases, dst, conv_info, weights_info, dilation, act_info, enable_fast_math);
88 }
89
run()90 void NEGEMMConvolutionLayer::run()
91 {
92 prepare();
93 MemoryGroupResourceScope scope_mg(_impl->memory_group);
94 _impl->op->run(_impl->run_pack);
95 }
96
prepare()97 void NEGEMMConvolutionLayer::prepare()
98 {
99 if(!_impl->is_prepared)
100 {
101 _impl->op->prepare(_impl->run_pack);
102
103 // Release temporary tensors that are only used in prepare stage
104 release_temporaries<Tensor>(_impl->aux_mem_req, _impl->workspace_tensors);
105 _impl->is_prepared = true;
106 }
107 }
108 } // namespace arm_compute
109