xref: /aosp_15_r20/external/ComputeLibrary/src/core/CL/CLUtils.cpp (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2020-2023 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_compute/core/CL/CLCompileContext.h"
25 #include "arm_compute/core/Error.h"
26 #include "arm_compute/core/Types.h"
27 #include "arm_compute/core/Validate.h"
28 #include "support/StringSupport.h"
29 
30 #include "src/core/CL/CLUtils.h"
31 #include "src/core/experimental/PostOpUtils.h"
32 
33 namespace arm_compute
34 {
create_image2d_from_buffer(const cl::Context & ctx,const cl::Buffer & buffer,const TensorShape & shape2d,DataType data_type,size_t image_row_pitch,CLImage2DType type)35 cl::Image2D create_image2d_from_buffer(const cl::Context &ctx, const cl::Buffer &buffer, const TensorShape &shape2d, DataType data_type, size_t image_row_pitch, CLImage2DType type)
36 {
37     cl_channel_type cl_data_type;
38 
39     switch(data_type)
40     {
41         case DataType::F32:
42             cl_data_type = CL_FLOAT;
43             break;
44         case DataType::F16:
45             cl_data_type = CL_HALF_FLOAT;
46             break;
47         default:
48             ARM_COMPUTE_ERROR("Data type not support with OpenCL image2d");
49     }
50 
51     cl_mem cl_image;
52     cl_int err = CL_SUCCESS;
53 
54     const cl_image_format format = { CL_RGBA, cl_data_type };
55 
56     cl_image_desc desc;
57     memset(&desc, 0, sizeof(desc));
58     desc.image_type      = CL_MEM_OBJECT_IMAGE2D;
59     desc.mem_object      = buffer();
60     desc.image_row_pitch = image_row_pitch;
61     desc.image_width     = shape2d[0];
62     desc.image_height    = shape2d[1];
63 
64     switch(type)
65     {
66         case CLImage2DType::ReadOnly:
67             cl_image = clCreateImage(ctx(), CL_MEM_READ_ONLY, &format, &desc, nullptr, &err);
68             break;
69         case CLImage2DType::WriteOnly:
70             cl_image = clCreateImage(ctx(), CL_MEM_WRITE_ONLY, &format, &desc, nullptr, &err);
71             break;
72         default:
73             ARM_COMPUTE_ERROR("Unsupported CLImage2DType");
74     }
75 
76     ARM_COMPUTE_UNUSED(err);
77     ARM_COMPUTE_ERROR_ON_MSG(err != CL_SUCCESS, "Error during the creation of CL image from buffer");
78 
79     return cl::Image2D(cl_image);
80 }
81 
82 namespace experimental
83 {
PostOpCLKernelUtils(const Config & supported_config)84 PostOpCLKernelUtils::PostOpCLKernelUtils(const Config &supported_config)
85     : _supported_config(supported_config)
86 {
87     ARM_COMPUTE_ERROR_ON_MSG(supported_config.empty(), "Empty PostOp CL kernel support configuration is not allowed");
88     for(auto it = _supported_config.begin(); it != _supported_config.end(); ++it)
89     {
90         auto post_op_sequence = it->first;
91         auto post_op_slots    = std::get<1>(it->second);
92         ARM_COMPUTE_ERROR_ON_MSG(post_op_sequence.size() != post_op_slots.size(), "The number of PostOps must be the same as that of the assigned slots");
93     }
94 }
95 
are_post_op_shapes_compliant(const ITensorInfo * dst,const experimental::PostOpList<ITensorInfo * > & post_ops)96 bool PostOpCLKernelUtils::are_post_op_shapes_compliant(const ITensorInfo *dst, const experimental::PostOpList<ITensorInfo *> &post_ops)
97 {
98     for(const auto &op : post_ops.get_list())
99     {
100         for(const auto &tensor : op->arguments())
101         {
102             const TensorShape &out_shape = TensorShape::broadcast_shape(dst->tensor_shape(), (*tensor)->tensor_shape());
103             // All post ops must be elementwise and must not alter the shape of the original dst tensor after broadcasting
104             if(detail::have_different_dimensions(out_shape, dst->tensor_shape(), 0))
105             {
106                 return false;
107             }
108             // NOTE: Kernel limitation: currently only the following broadcasting types are supported:
109             //  1. Post op arg is scalar, broadcast in both first and second dims
110             //  2. Post op arg is of shape: second dim=1, first dim=N, broadcast only in second dim
111             //  This means this case: Post op arg is of shape: second dim=M, first dim=1, broadcast only in first dim, is NOT supported
112             if(dst->dimension(0) > 1 && dst->dimension(1) > 1 && (*tensor)->dimension(0) == 1 && (*tensor)->dimension(1) > 1)
113             {
114                 return false;
115             }
116         }
117     }
118     return true;
119 }
120 
is_post_op_sequence_supported(const PostOpList<ITensorInfo * > & post_ops) const121 bool PostOpCLKernelUtils::is_post_op_sequence_supported(const PostOpList<ITensorInfo *> &post_ops) const
122 {
123     if(post_ops.size() == 0)
124     {
125         return true; // Always support cases where no post op is specified
126     }
127     const auto post_op_sequence = get_post_op_sequence(post_ops);
128 
129     return _supported_config.find(post_op_sequence) != _supported_config.end();
130 }
131 
set_post_ops_cl_build_options(CLBuildOptions & build_opts,const PostOpList<ITensorInfo * > & post_ops) const132 void PostOpCLKernelUtils::set_post_ops_cl_build_options(CLBuildOptions &build_opts, const PostOpList<ITensorInfo *> &post_ops) const
133 {
134     const auto post_op_sequence = get_post_op_sequence(post_ops);
135     const auto slots            = std::get<1>(_supported_config.at(post_op_sequence));
136     for(size_t post_op_id = 0; post_op_id < post_ops.size(); ++post_op_id)
137     {
138         const auto &post_op     = post_ops.get_list().at(post_op_id);
139         const auto  slot_prefix = "-DP" + support::cpp11::to_string(slots[post_op_id]);
140         if(post_op->type() == experimental::PostOpType::Activation)
141         {
142             const auto _post_op  = utils::cast::polymorphic_downcast<const experimental::PostOpAct<ITensorInfo *> *>(post_op.get());
143             const auto act_type  = slot_prefix + "_ACTIVATION_TYPE=" + lower_string(string_from_activation_func(_post_op->_act_info.activation()));
144             const auto act_a_val = slot_prefix + "_ACTIVATION_A_VAL=" + float_to_string_with_full_precision(_post_op->_act_info.a());
145             const auto act_b_val = slot_prefix + "_ACTIVATION_B_VAL=" + float_to_string_with_full_precision(_post_op->_act_info.b());
146             build_opts.add_option(act_type);
147             build_opts.add_option(act_a_val);
148             build_opts.add_option(act_b_val);
149         }
150         else if(post_op->type() == experimental::PostOpType::Eltwise_Add)
151         {
152             size_t     arg_id     = 1;
153             const auto eltwise_op = slot_prefix + "_ELTWISE_OP=ADD" + "_X_POS_" + support::cpp11::to_string(post_op->prev_dst_pos());
154             build_opts.add_option(eltwise_op);
155             for(const auto &tensor : post_op->arguments())
156             {
157                 const auto height = slot_prefix + "_ELTWISE_ARG" + support::cpp11::to_string(arg_id) + "_HEIGHT=" + support::cpp11::to_string((*tensor)->dimension(1));
158                 const auto width  = slot_prefix + "_ELTWISE_ARG" + support::cpp11::to_string(arg_id) + "_WIDTH=" + support::cpp11::to_string((*tensor)->dimension(0));
159                 build_opts.add_option(height);
160                 build_opts.add_option(width);
161                 ++arg_id;
162             }
163         }
164         else if(post_op->type() == experimental::PostOpType::Eltwise_PRelu)
165         {
166             size_t     arg_id     = 1;
167             const auto eltwise_op = slot_prefix + "_ELTWISE_OP=PRELU" + "_X_POS_" + support::cpp11::to_string(post_op->prev_dst_pos());
168             build_opts.add_option(eltwise_op);
169             for(const auto &tensor : post_op->arguments())
170             {
171                 const auto height = slot_prefix + "_ELTWISE_ARG" + support::cpp11::to_string(arg_id) + "_HEIGHT=" + support::cpp11::to_string((*tensor)->dimension(1));
172                 const auto width  = slot_prefix + "_ELTWISE_ARG" + support::cpp11::to_string(arg_id) + "_WIDTH=" + support::cpp11::to_string((*tensor)->dimension(0));
173                 build_opts.add_option(height);
174                 build_opts.add_option(width);
175                 ++arg_id;
176             }
177         }
178     }
179 }
180 
set_post_ops_cl_kernel_name(std::string & kernel_name,const PostOpList<ITensorInfo * > & post_ops) const181 void PostOpCLKernelUtils::set_post_ops_cl_kernel_name(std::string &kernel_name, const PostOpList<ITensorInfo *> &post_ops) const
182 {
183     const auto post_op_sequence = get_post_op_sequence(post_ops);
184     const auto postfix          = std::get<0>(_supported_config.at(post_op_sequence));
185     kernel_name += postfix;
186 }
187 } // namespace experimental
188 
189 } // namespace arm_compute
190