xref: /aosp_15_r20/external/ComputeLibrary/src/gpu/cl/operators/ClIndirectConv2d.cpp (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2022 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "src/gpu/cl/operators/ClIndirectConv2d.h"
25 
26 #include "arm_compute/core/KernelDescriptors.h"
27 #include "arm_compute/core/Types.h"
28 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
29 #include "arm_compute/runtime/CL/CLScheduler.h"
30 #include "src/gpu/cl/kernels/ClIndirectConv2dAddressPrecalculationKernel.h"
31 #include "src/gpu/cl/kernels/ClIndirectConv2dKernel.h"
32 #include "src/runtime/heuristics/indirect_conv/ClIndirectConvKernelConfig.h"
33 #include "src/runtime/heuristics/indirect_conv/IClIndirectConvKernelConfig.h"
34 
35 #include "src/core/helpers/MemoryHelpers.h"
36 #include "src/gpu/cl/utils/ClAuxTensorHandler.h"
37 
38 #include "src/common/utils/Log.h"
39 
40 using namespace arm_compute::cl_indirect_conv;
41 
42 namespace arm_compute
43 {
44 namespace opencl
45 {
46 using namespace arm_compute::experimental;
47 
48 namespace
49 {
config_indirect_convolution_nhwc(const ITensorInfo * src,const ITensorInfo * weights,const PadStrideInfo & conv_info)50 DirectConvComputeKernelInfo config_indirect_convolution_nhwc(const ITensorInfo *src, const ITensorInfo *weights, const PadStrideInfo &conv_info)
51 {
52     // Get GPU target
53     GPUTarget gpu_target = CLScheduler::get().target();
54 
55     std::unique_ptr<IClIndirectConvKernelConfig> t = ClIndirectConvKernelConfigurationFactory::create(gpu_target);
56 
57     return t->configure(src, weights, conv_info);
58 }
59 
60 } // namespace
61 
configure(const CLCompileContext & compile_context,ITensorInfo * src,ITensorInfo * weights,ITensorInfo * biases,ITensorInfo * dst,const PadStrideInfo & conv_info,const ActivationLayerInfo & act_info)62 void ClIndirectConv2d::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst,
63                                  const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info)
64 {
65     ARM_COMPUTE_ERROR_ON_NULLPTR(src);
66     ARM_COMPUTE_LOG_PARAMS(src, weights, biases, dst, conv_info, act_info);
67 
68     // Reuse the direct convolution descriptor
69     const DirectConvComputeKernelInfo desc = config_indirect_convolution_nhwc(src, weights, conv_info);
70 
71     // Configure indirect convolution kernels
72     auto k0 = std::make_unique<kernels::ClIndirectConv2dAddressPrecalculationKernel>();
73     auto k1 = std::make_unique<kernels::ClIndirectConv2dKernel>();
74 
75     k0->set_target(CLScheduler::get().target());
76     k1->set_target(CLScheduler::get().target());
77 
78     k0->configure(compile_context, src, weights, &_indirect_buffer, conv_info, desc);
79     k1->configure(compile_context, src, weights, biases, &_indirect_buffer, dst, conv_info, act_info, desc);
80 
81     _addr_precalculation_kernel = std::move(k0);
82     _indirect_conv_kernel       = std::move(k1);
83     _is_prepared                = false;
84 
85     // Tune kernels
86     CLScheduler::get().tune_kernel_static(*_indirect_conv_kernel);
87 
88     // Request memory for the indirect buffer
89     _aux_mem[IndirectBuffer] = MemoryInfo(offset_int_vec(IndirectBuffer), MemoryLifetime::Persistent, _indirect_buffer.total_size());
90 }
91 
validate(const ITensorInfo * src,const ITensorInfo * weights,const ITensorInfo * biases,const ITensorInfo * dst,const PadStrideInfo & conv_info,const ActivationLayerInfo & act_info)92 Status ClIndirectConv2d::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst,
93                                   const PadStrideInfo &conv_info, const ActivationLayerInfo &act_info)
94 {
95     // Initialize the direct convolution descriptor
96     const DirectConvComputeKernelInfo desc = config_indirect_convolution_nhwc(src, weights, conv_info);
97 
98     TensorShape ind_buffer_shape = misc::shape_calculator::compute_indirect_buffer_shape(src->tensor_shape(),
99                                                                                          src->data_layout(),
100                                                                                          weights->tensor_shape(),
101                                                                                          conv_info,
102                                                                                          desc);
103 
104     TensorInfo indirect_buffer(ind_buffer_shape, 1, DataType::S32);
105 
106     ARM_COMPUTE_RETURN_ON_ERROR(kernels::ClIndirectConv2dAddressPrecalculationKernel::validate(src, weights, &indirect_buffer, conv_info, desc));
107     ARM_COMPUTE_RETURN_ON_ERROR(kernels::ClIndirectConv2dKernel::validate(src, weights, biases, &indirect_buffer, dst, conv_info, act_info, desc));
108 
109     return Status{};
110 }
111 
run(ITensorPack & tensors)112 void ClIndirectConv2d::run(ITensorPack &tensors)
113 {
114     CLAuxTensorHandler indirect_buffer(offset_int_vec(IndirectBuffer), _indirect_buffer, tensors, true);
115 
116     prepare(tensors);
117 
118     ITensorPack indirect_conv2d_pack(tensors);
119     indirect_conv2d_pack.add_const_tensor(ACL_SRC_3, indirect_buffer.get());
120 
121     // Run indirect convolution
122     CLScheduler::get().enqueue_op(*_indirect_conv_kernel, indirect_conv2d_pack, true);
123 }
124 
prepare(ITensorPack & constants)125 void ClIndirectConv2d::prepare(ITensorPack &constants)
126 {
127     if(!_is_prepared)
128     {
129         ICLTensor *indirect_buffer_aux = utils::cast::polymorphic_downcast<ICLTensor *>(constants.get_tensor(offset_int_vec(IndirectBuffer)));
130         ARM_COMPUTE_ERROR_ON(indirect_buffer_aux == nullptr);
131 
132         ARM_COMPUTE_LOG_INFO_WITH_FUNCNAME_ACL("Preparing indirect buffer");
133 
134         CLAuxTensorHandler indirect_buffer(_indirect_buffer, *indirect_buffer_aux);
135         ARM_COMPUTE_ERROR_ON(indirect_buffer.get()->cl_buffer().get() == nullptr);
136 
137         ITensorPack indirect_buffer_pack{ { ACL_DST, indirect_buffer.get() } };
138         CLScheduler::get().enqueue_op(*_addr_precalculation_kernel, indirect_buffer_pack, true);
139 
140         _is_prepared = true;
141     }
142 }
143 
workspace() const144 experimental::MemoryRequirements ClIndirectConv2d::workspace() const
145 {
146     return _aux_mem;
147 }
148 } // namespace opencl
149 } // namespace arm_compute
150