xref: /aosp_15_r20/external/ComputeLibrary/src/gpu/cl/kernels/ClWinogradInputTransformKernel.cpp (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2018-2022 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "src/gpu/cl/kernels/ClWinogradInputTransformKernel.h"
25 
26 #include "arm_compute/core/CL/CLHelpers.h"
27 #include "arm_compute/core/CL/CLKernelLibrary.h"
28 #include "arm_compute/core/CL/ICLTensor.h"
29 #include "arm_compute/core/CL/OpenCL.h"
30 #include "arm_compute/core/Error.h"
31 #include "arm_compute/core/Helpers.h"
32 #include "arm_compute/core/Types.h"
33 #include "arm_compute/core/Utils.h"
34 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
35 #include "src/core/AccessWindowStatic.h"
36 #include "src/core/CL/CLValidate.h"
37 #include "src/core/helpers/AutoConfiguration.h"
38 #include "src/core/helpers/WindowHelpers.h"
39 #include "support/Cast.h"
40 #include "support/StringSupport.h"
41 
42 namespace arm_compute
43 {
44 namespace opencl
45 {
46 namespace kernels
47 {
48 namespace
49 {
validate_arguments(const ITensorInfo * input,const ITensorInfo * output,const WinogradInfo & winograd_info)50 Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const WinogradInfo &winograd_info)
51 {
52     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32, DataType::F16);
53     ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
54 
55     const PadStrideInfo conv_info        = winograd_info.convolution_info;
56     const Size2D        output_tile_size = winograd_info.output_tile_size;
57     const Size2D        kernel_size      = winograd_info.kernel_size;
58     ARM_COMPUTE_RETURN_ERROR_ON_MSG(conv_info.stride().first != 1 || conv_info.stride().second != 1, "Winograd input transform only supports unit strides");
59     ARM_COMPUTE_RETURN_ERROR_ON_MSG(!cl_winograd_convolution_layer_supported(output_tile_size, kernel_size, input->data_layout()), "Winograd input transform not supported");
60 
61     ARM_COMPUTE_UNUSED(conv_info);
62     ARM_COMPUTE_UNUSED(output_tile_size);
63     ARM_COMPUTE_UNUSED(kernel_size);
64 
65     // Validate configured output
66     if(output->total_size() != 0)
67     {
68         const TensorShape output_shape = misc::shape_calculator::compute_winograd_input_transform_shape(*input, winograd_info);
69 
70         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output->tensor_shape(), output_shape);
71         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
72     }
73 
74     return Status{};
75 }
76 
validate_and_configure_window(ITensorInfo * input,ITensorInfo * output,const WinogradInfo & winograd_info)77 std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output, const WinogradInfo &winograd_info)
78 {
79     ARM_COMPUTE_UNUSED(output);
80     ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
81 
82     bool   window_changed = false;
83     Window win            = calculate_max_window(*input, Steps(1, 1));
84 
85     if(input->data_layout() == DataLayout::NCHW)
86     {
87         const PadStrideInfo conv_info        = winograd_info.convolution_info;
88         const Size2D        output_tile_size = winograd_info.output_tile_size;
89         const Size2D        kernel_size      = winograd_info.kernel_size;
90 
91         unsigned int num_elems_read_per_iteration_x = output_tile_size.width + kernel_size.width - 1;
92         unsigned int num_elems_read_per_iteration_y = output_tile_size.height + kernel_size.height - 1;
93 
94         AccessWindowRectangle input_access(input, -conv_info.pad_left(), -conv_info.pad_top(), num_elems_read_per_iteration_x, num_elems_read_per_iteration_y);
95         window_changed = update_window_and_padding(win, input_access);
96     }
97 
98     Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
99     return std::make_pair(err, win);
100 }
101 } // namespace
102 
ClWinogradInputTransformKernel()103 ClWinogradInputTransformKernel::ClWinogradInputTransformKernel()
104 {
105     _type = CLKernelType::WINOGRAD;
106 }
107 
border_size() const108 BorderSize ClWinogradInputTransformKernel::border_size() const
109 {
110     return _border_size;
111 }
112 
configure(const ClCompileContext & compile_context,ITensorInfo * src,ITensorInfo * dst,const WinogradInfo & winograd_info)113 void ClWinogradInputTransformKernel::configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst, const WinogradInfo &winograd_info)
114 {
115     ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
116     ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(src, dst, winograd_info));
117 
118     auto padding_info = get_padding_info({ src, dst });
119 
120     const PadStrideInfo conv_info        = winograd_info.convolution_info;
121     const Size2D        output_tile_size = winograd_info.output_tile_size;
122     const Size2D        kernel_size      = winograd_info.kernel_size;
123 
124     _data_layout = src->data_layout();
125 
126     const size_t idx_w = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
127     const size_t idx_h = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
128 
129     // Compute the number of output tiles along the x and y direction of size "output_tile_size"
130     const Size2D num_tiles = compute_winograd_convolution_tiles(Size2D(src->dimension(idx_w), src->dimension(idx_h)),
131                                                                 kernel_size,
132                                                                 output_tile_size,
133                                                                 conv_info);
134 
135     _num_tiles_x = num_tiles.width;
136     _num_tiles_y = num_tiles.height;
137 
138     const TensorShape output_shape = misc::shape_calculator::compute_winograd_input_transform_shape(*src, winograd_info);
139 
140     // Output auto initialization if not yet initialized
141     auto_init_if_empty(*dst, src->clone()->set_tensor_shape(output_shape));
142 
143     ARM_COMPUTE_ERROR_ON(_num_tiles_x * _num_tiles_y != static_cast<int>(dst->dimension(1)));
144     const size_t total_batches = src->tensor_shape().total_size_upper(3);
145 
146     CLBuildOptions build_opts;
147     if(_data_layout == DataLayout::NHWC)
148     {
149         build_opts.add_option("-DNHWC");
150         _src_width  = src->dimension(idx_w);
151         _src_height = src->dimension(idx_h);
152         build_opts.add_option("-DPAD_LEFT=" + support::cpp11::to_string(conv_info.pad_left()));
153         build_opts.add_option("-DPAD_TOP=" + support::cpp11::to_string(conv_info.pad_top()));
154         build_opts.add_option("-DOUTPUT_TILE_W=" + support::cpp11::to_string(output_tile_size.width));
155         build_opts.add_option("-DOUTPUT_TILE_H=" + support::cpp11::to_string(output_tile_size.height));
156         build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(src->data_type()));
157         build_opts.add_option_if(winograd_info.kernel_size.height == 1, "-DWINOGRAD_INPUT_TRANSFORM_HORIZONTAL");
158         build_opts.add_option_if(winograd_info.kernel_size.width == 1, "-DWINOGRAD_INPUT_TRANSFORM_VERTICAL");
159     }
160     else
161     {
162         build_opts.add_option("-DNUM_TILES_X=" + support::cpp11::to_string(_num_tiles_x));
163         build_opts.add_option("-DPAD_LEFT=" + support::cpp11::to_string(conv_info.pad_left()));
164         build_opts.add_option("-DPAD_TOP=" + support::cpp11::to_string(conv_info.pad_top()));
165         build_opts.add_option("-DOUTPUT_TILE_W=" + support::cpp11::to_string(output_tile_size.width));
166         build_opts.add_option("-DOUTPUT_TILE_H=" + support::cpp11::to_string(output_tile_size.height));
167         build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(src->data_type()));
168         build_opts.add_option_if(winograd_info.kernel_size.height == 1, "-DWINOGRAD_INPUT_TRANSFORM_HORIZONTAL");
169         build_opts.add_option_if(winograd_info.kernel_size.width == 1, "-DWINOGRAD_INPUT_TRANSFORM_VERTICAL");
170         build_opts.add_option_if(total_batches > 1, "-DSRC_DEPTH=" + support::cpp11::to_string(src->dimension(2)));
171     }
172 
173     // Create kernel
174     std::string kernel_name = "winograd_input_transform_" + output_tile_size.to_string() + "_" + kernel_size.to_string();
175 
176     // Get the maximum dimension from the tile size
177     const unsigned int tile_max_dim = std::max(output_tile_size.width, output_tile_size.height);
178 
179     // Check optimized kernel if output_dims == 2x2
180     if((tile_max_dim == 2) && (_data_layout == DataLayout::NCHW))
181     {
182         _step_z = (src->dimension(2) % 2) != 0 ? 1 : 2;
183     }
184 
185     // Append stepz and data layout
186     kernel_name += "_stepz";
187     kernel_name += support::cpp11::to_string(_step_z);
188     kernel_name += "_" + lower_string(string_from_data_layout(_data_layout));
189 
190     // A macro guard to compile ONLY the kernel of interest
191     build_opts.add_option("-D" + upper_string(kernel_name));
192     _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
193 
194     // Create window and update padding
195     auto win_config = validate_and_configure_window(src, dst, winograd_info);
196     ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
197     IClKernel::configure_internal(win_config.second, cl::NDRange(1, 1, 8));
198 
199     _border_size = BorderSize(src->padding());
200 
201     ARM_COMPUTE_ERROR_ON((src->data_layout() == DataLayout::NHWC) && has_padding_changed(padding_info));
202 
203     _config_id = kernel_name;
204     _config_id += support::cpp11::to_string(src->dimension(0));
205     _config_id += "_";
206     _config_id += support::cpp11::to_string(src->dimension(1));
207     _config_id += "_";
208     _config_id += support::cpp11::to_string(src->dimension(2));
209     _config_id += "_";
210     _config_id += support::cpp11::to_string(conv_info.pad_left());
211     _config_id += "_";
212     _config_id += support::cpp11::to_string(conv_info.pad_top());
213     _config_id += "_";
214     _config_id += lower_string(string_from_data_layout(_data_layout));
215 }
216 
validate(const ITensorInfo * src,const ITensorInfo * dst,const WinogradInfo & winograd_info)217 Status ClWinogradInputTransformKernel::validate(const ITensorInfo *src, const ITensorInfo *dst, const WinogradInfo &winograd_info)
218 {
219     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
220     ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(src, dst, winograd_info));
221     ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(src->clone().get(), dst->clone().get(), winograd_info).first);
222     return Status{};
223 }
224 
run_op(ITensorPack & tensors,const Window & window,cl::CommandQueue & queue)225 void ClWinogradInputTransformKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
226 {
227     ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
228     ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
229 
230     auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
231     auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
232 
233     const size_t idx_w         = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::WIDTH);
234     const size_t idx_h         = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::HEIGHT);
235     const size_t idx_c         = get_data_layout_dimension_index(_data_layout, DataLayoutDimension::CHANNEL);
236     const size_t total_batches = window.shape().total_size_upper(3);
237 
238     // Collapse window
239     Window window_collapsed = window.collapse_if_possible(IClKernel::window(), Window::DimZ);
240 
241     if(_data_layout == DataLayout::NHWC)
242     {
243         Window slice = window_collapsed.first_slice_window_3D();
244         slice.set(1, Window::Dimension(0, _num_tiles_x * _num_tiles_y, 1));
245         slice.set(2, Window::Dimension(0, total_batches, 1));
246 
247         unsigned int idx = 0;
248         add_4D_tensor_argument(idx, src, slice);
249         add_4D_tensor_argument(idx, dst, slice);
250         _kernel.setArg<cl_uint>(idx++, _src_width);
251         _kernel.setArg<cl_uint>(idx++, _src_height);
252         _kernel.setArg<cl_uint>(idx++, _num_tiles_x);
253         _kernel.setArg<cl_uint>(idx++, _num_tiles_y);
254         enqueue(queue, *this, slice, lws_hint());
255     }
256     else
257     {
258         Window slice = window_collapsed.first_slice_window_3D();
259         slice.set(idx_w, Window::Dimension(0, _num_tiles_x, 1));
260         slice.set(idx_h, Window::Dimension(0, _num_tiles_y, 1));
261 
262         ARM_COMPUTE_ERROR_ON(((slice[idx_c].end() - slice[idx_c].start()) % _step_z) != 0);
263         slice.set(idx_c, Window::Dimension(slice[idx_c].start(), slice[idx_c].end(), _step_z));
264 
265         unsigned int idx = 2 * num_arguments_per_3D_tensor();
266         _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(src->info()->strides_in_bytes()[3]));
267         _kernel.setArg<cl_uint>(idx++, static_cast<unsigned int>(dst->info()->strides_in_bytes()[3]));
268 
269         do
270         {
271             unsigned int idx = 0;
272             add_3D_tensor_argument(idx, src, slice);
273             add_3D_tensor_argument(idx, dst, slice);
274 
275             enqueue(queue, *this, slice, lws_hint());
276         }
277         while(window_collapsed.slide_window_slice_3D(slice));
278     }
279 }
280 } // namespace kernels
281 } // namespace opencl
282 } // namespace arm_compute
283