xref: /aosp_15_r20/external/ComputeLibrary/src/gpu/cl/kernels/ClCopyKernel.cpp (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2018-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "src/gpu/cl/kernels/ClCopyKernel.h"
25 
26 #include "arm_compute/core/CL/CLHelpers.h"
27 #include "arm_compute/core/CL/CLKernelLibrary.h"
28 #include "arm_compute/core/CL/ICLTensor.h"
29 #include "arm_compute/core/Helpers.h"
30 #include "arm_compute/core/TensorInfo.h"
31 #include "arm_compute/core/Utils.h"
32 #include "arm_compute/core/Validate.h"
33 #include "src/core/CL/CLValidate.h"
34 #include "src/core/helpers/AutoConfiguration.h"
35 #include "src/core/helpers/WindowHelpers.h"
36 #include "support/Cast.h"
37 #include "support/StringSupport.h"
38 
39 namespace arm_compute
40 {
41 namespace opencl
42 {
43 namespace kernels
44 {
45 namespace
46 {
validate_arguments(const ITensorInfo * src,const ITensorInfo * dst,Window * dst_window=nullptr)47 Status validate_arguments(const ITensorInfo *src, const ITensorInfo *dst, Window *dst_window = nullptr)
48 {
49     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
50 
51     // Validate dst if initialized
52     if(dst->total_size() != 0)
53     {
54         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, dst);
55         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(src, dst);
56         if(dst_window == nullptr)
57         {
58             ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(src->tensor_shape(), dst->tensor_shape());
59         }
60         else
61         {
62             ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(src->tensor_shape(), dst_window->shape());
63         }
64     }
65 
66     return Status{};
67 }
68 
69 } // namespace
70 
ClCopyKernel()71 ClCopyKernel::ClCopyKernel()
72 {
73     _type = CLKernelType::ELEMENTWISE;
74 }
75 
configure(const CLCompileContext & compile_context,const ITensorInfo * src,ITensorInfo * dst,Window * dst_window)76 void ClCopyKernel::configure(const CLCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst, Window *dst_window)
77 {
78     ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
79     ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst, dst_window));
80 
81     auto padding_info = get_padding_info({ src, dst });
82 
83     // Create kernel
84     CLBuildOptions build_opts;
85     build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(src->data_type()));
86 
87     // Output auto inizialitation if not yet initialized
88     auto_init_if_empty(*dst, *src);
89 
90     // Configure window
91     const unsigned int vec_size_x = adjust_vec_size(16 / src->element_size(), src->dimension(0));
92 
93     const Window win_config = calculate_max_window(*src, Steps(vec_size_x));
94 
95     if(dst_window != nullptr)
96     {
97         _has_dst_window                = true;
98         _dst_window                    = Window(*dst_window);
99         const int  width_x             = dst_window->num_iterations(0);
100         const int  vec_size_x_leftover = width_x % vec_size_x;
101         const bool multi_access_x      = width_x >= static_cast<int32_t>(vec_size_x);
102 
103         if(multi_access_x)
104         {
105             _dst_window.set(Window::DimX, Window::Dimension(dst_window->x().start(), ceil_to_multiple(dst_window->x().end(), vec_size_x), vec_size_x));
106         }
107 
108         build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(vec_size_x_leftover));
109     }
110     else
111     {
112         const int width_x             = src->tensor_shape().x();
113         const int vec_size_x_leftover = width_x % vec_size_x;
114 
115         build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(vec_size_x_leftover));
116     }
117 
118     build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(vec_size_x));
119 
120     // Build kernel
121     _kernel = create_kernel(compile_context, "copy_tensor", build_opts.options());
122 
123     // Validate and set the window
124     ICLKernel::configure_internal(win_config);
125 
126     ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
127 }
128 
validate(const arm_compute::ITensorInfo * src,const arm_compute::ITensorInfo * dst,Window * dst_window)129 Status ClCopyKernel::validate(const arm_compute::ITensorInfo *src, const arm_compute::ITensorInfo *dst, Window *dst_window)
130 {
131     ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst, dst_window));
132 
133     return Status{};
134 }
135 
run_op(ITensorPack & tensors,const Window & window,cl::CommandQueue & queue)136 void ClCopyKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
137 {
138     ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
139     ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
140 
141     const auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
142     auto       dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
143 
144     Window slice;
145 
146     if(_has_dst_window)
147     {
148         slice            = window.first_slice_window_3D();
149         Window out_slice = _dst_window.first_slice_window_3D();
150         do
151         {
152             unsigned int idx = 0;
153             add_3D_tensor_argument(idx, src, slice);
154             add_3D_tensor_argument(idx, dst, out_slice);
155             enqueue(queue, *this, slice, lws_hint());
156         }
157         while(window.slide_window_slice_3D(slice) && _dst_window.slide_window_slice_3D(out_slice));
158     }
159     else
160     {
161         Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
162         slice            = collapsed.first_slice_window_3D();
163         do
164         {
165             unsigned int idx = 0;
166             add_3D_tensor_argument(idx, src, slice);
167             add_3D_tensor_argument(idx, dst, slice);
168             enqueue(queue, *this, slice, lws_hint());
169         }
170         while(collapsed.slide_window_slice_3D(slice));
171     }
172 }
173 } // namespace kernels
174 } // namespace opencl
175 } // namespace arm_compute
176