1 /*
2 * Copyright (c) 2018-2022 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "src/gpu/cl/kernels/ClGemmLowpOffsetContributionOutputStageKernel.h"
25
26 #include "arm_compute/core/CL/CLHelpers.h"
27 #include "arm_compute/core/CL/ICLTensor.h"
28 #include "arm_compute/core/Helpers.h"
29 #include "arm_compute/core/TensorInfo.h"
30 #include "arm_compute/core/Utils.h"
31 #include "arm_compute/core/Validate.h"
32
33 #include "src/core/helpers/AutoConfiguration.h"
34 #include "src/core/helpers/WindowHelpers.h"
35
36 #include "support/Cast.h"
37 #include "support/StringSupport.h"
38
39 namespace arm_compute
40 {
41 namespace opencl
42 {
43 namespace kernels
44 {
45 namespace
46 {
validate_arguments(const ITensorInfo * mm_result,const ITensorInfo * vector_sum_col,const ITensorInfo * vector_sum_row,const ITensorInfo * bias,const ITensorInfo * dst,int32_t a_offset,int32_t b_offset,const GEMMLowpOutputStageInfo & output_stage,const ITensorInfo * output_multipliers,const ITensorInfo * output_shifts)47 Status validate_arguments(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias, const ITensorInfo *dst,
48 int32_t a_offset, int32_t b_offset, const GEMMLowpOutputStageInfo &output_stage, const ITensorInfo *output_multipliers, const ITensorInfo *output_shifts)
49 {
50 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(mm_result, 1, DataType::S32);
51
52 if(bias != nullptr)
53 {
54 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32);
55 ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > 1);
56 ARM_COMPUTE_RETURN_ERROR_ON(mm_result->dimension(0) != bias->dimension(0));
57 }
58
59 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_multipliers, 1, DataType::S32);
60 ARM_COMPUTE_RETURN_ERROR_ON(output_multipliers->num_dimensions() > 1);
61 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output_shifts, 1, DataType::S32);
62 ARM_COMPUTE_RETURN_ERROR_ON(output_shifts->num_dimensions() > 1);
63 if(output_stage.is_quantized_per_channel)
64 {
65 ARM_COMPUTE_RETURN_ERROR_ON(mm_result->dimension(0) != output_shifts->dimension(0));
66 ARM_COMPUTE_RETURN_ERROR_ON(mm_result->dimension(0) != output_multipliers->dimension(0));
67 }
68
69 // If a_offset == 0, vector_sum_col can be a nullptr
70 if(a_offset != 0)
71 {
72 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_col, 1, DataType::S32);
73 ARM_COMPUTE_RETURN_ERROR_ON(vector_sum_col->dimension(0) != mm_result->dimension(0));
74 }
75
76 // If b_offset == 0, vector_sum_row can be a nullptr
77 if(b_offset != 0)
78 {
79 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(vector_sum_row, 1, DataType::S32);
80
81 // Check if input is a 3D reinterpretation
82 const bool reinterpret_as_3d = mm_result->num_dimensions() > 1 && mm_result->tensor_shape().y() != vector_sum_row->tensor_shape().x();
83
84 // Validate input
85 ARM_COMPUTE_RETURN_ERROR_ON(reinterpret_as_3d && vector_sum_row->dimension(0) != (mm_result->dimension(1) * mm_result->dimension(2)));
86 ARM_COMPUTE_RETURN_ERROR_ON(!reinterpret_as_3d && vector_sum_row->dimension(0) != mm_result->dimension(1));
87
88 TensorShape output_shape = mm_result->tensor_shape();
89 if(output_shape.num_dimensions() > 1)
90 {
91 const unsigned int output_batch_idx = reinterpret_as_3d ? 3 : 2;
92
93 TensorShape vector_sum_row_shape = vector_sum_row->tensor_shape();
94 vector_sum_row_shape.collapse_from(1);
95 output_shape.collapse_from(output_batch_idx);
96
97 ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_row_shape[1] != output_shape[output_batch_idx],
98 "mm_result tensor must have the same number of batches of output tensor");
99
100 if(a_offset != 0)
101 {
102 TensorShape vector_sum_col_shape = vector_sum_col->tensor_shape();
103 vector_sum_col_shape.collapse_from(1);
104
105 ARM_COMPUTE_RETURN_ERROR_ON_MSG(vector_sum_col_shape[1] != 1 && vector_sum_col_shape[1] != vector_sum_row_shape[1],
106 "vector_sum_col tensor must have the same number of batches of vector_sum_row_shape or the number of batches must be set to 1");
107 }
108 }
109 }
110
111 ARM_COMPUTE_RETURN_ERROR_ON(output_stage.type == GEMMLowpOutputStageType::NONE);
112 // Checks performed when output is configured
113 if((dst != nullptr) && (dst->total_size() != 0))
114 {
115 ARM_COMPUTE_RETURN_ERROR_ON(output_stage.output_data_type != dst->data_type());
116 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(dst, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED);
117 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mm_result, dst);
118 }
119
120 ARM_COMPUTE_RETURN_ERROR_ON(output_stage.gemmlowp_min_bound > output_stage.gemmlowp_max_bound);
121 ARM_COMPUTE_RETURN_ERROR_ON_MSG(output_stage.gemmlowp_multipliers.size() != output_stage.gemmlowp_shifts.size(), "per channel quantization info is incorrect");
122
123 return Status{};
124 }
125 } // namespace
126
ClGemmLowpOffsetContributionOutputStageKernel()127 ClGemmLowpOffsetContributionOutputStageKernel::ClGemmLowpOffsetContributionOutputStageKernel()
128 {
129 _type = CLKernelType::ELEMENTWISE;
130 }
131
configure(const CLCompileContext & compile_context,const ITensorInfo * mm_result,const ITensorInfo * vector_sum_col,const ITensorInfo * vector_sum_row,const ITensorInfo * bias,ITensorInfo * dst,int32_t k,int32_t a_offset,int32_t b_offset,const GEMMLowpOutputStageInfo & output_stage,const ITensorInfo * output_multipliers,const ITensorInfo * output_shifts)132 void ClGemmLowpOffsetContributionOutputStageKernel::configure(const CLCompileContext &compile_context,
133 const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias, ITensorInfo *dst,
134 int32_t k, int32_t a_offset, int32_t b_offset, const GEMMLowpOutputStageInfo &output_stage,
135 const ITensorInfo *output_multipliers, const ITensorInfo *output_shifts)
136 {
137 // Perform validate step
138 ARM_COMPUTE_ERROR_ON_NULLPTR(mm_result, dst, output_multipliers, output_shifts);
139 ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(mm_result, vector_sum_col, vector_sum_row, bias, dst, a_offset, b_offset, output_stage, output_multipliers, output_shifts));
140
141 auto padding_info = get_padding_info({ mm_result, vector_sum_col, vector_sum_row, bias, dst, output_multipliers, output_shifts });
142
143 const int min = output_stage.gemmlowp_min_bound;
144 const int max = output_stage.gemmlowp_max_bound;
145
146 _is_quantized_per_channel = output_stage.is_quantized_per_channel;
147
148 // Check if input is a 3D reinterpretation
149 const bool reinterpret_as_3d = vector_sum_row != nullptr
150 && mm_result->num_dimensions() > 1
151 && mm_result->tensor_shape().y() != vector_sum_row->tensor_shape().x();
152
153 // Auto initialize the output
154 auto_init_if_empty(*dst, mm_result->clone()->set_data_type(output_stage.output_data_type));
155
156 const unsigned int num_elems_processed_per_iteration = adjust_vec_size(4, mm_result->dimension(0));
157
158 // Set the arguments to pass at compile time
159 CLBuildOptions build_opts;
160 build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
161 build_opts.add_option("-DVEC_SIZE_LEFTOVER=" + support::cpp11::to_string(mm_result->dimension(0) % num_elems_processed_per_iteration));
162
163 // If a_offset == 0, vector_sum_col can be a nullptr
164 if(a_offset != 0)
165 {
166 build_opts.add_option("-DA_OFFSET=" + support::cpp11::to_string(a_offset));
167 build_opts.add_option_if(vector_sum_col->tensor_shape().num_dimensions() > 1, "-DSUM_COL_HAS_BATCHES");
168 }
169 // If b_offset == 0, vector_sum_row can be a nullptr
170 build_opts.add_option_if(b_offset != 0, "-DB_OFFSET=" + support::cpp11::to_string(b_offset));
171 build_opts.add_option("-DK_OFFSET=" + support::cpp11::to_string(a_offset * b_offset * k));
172 build_opts.add_option_if(reinterpret_as_3d, "-DHEIGHT_INPUT3D=" + support::cpp11::to_string(mm_result->dimension(1)));
173 build_opts.add_option_if(reinterpret_as_3d, "-DDEPTH_INPUT3D=" + support::cpp11::to_string(mm_result->dimension(2)));
174 build_opts.add_option_if(bias != nullptr, "-DADD_BIAS");
175 build_opts.add_option("-DRESULT_OFFSET=" + support::cpp11::to_string(output_stage.gemmlowp_offset));
176 build_opts.add_option("-DRESULT_MULTIPLIER=" + support::cpp11::to_string(output_stage.gemmlowp_multipliers[0]));
177 build_opts.add_option("-DRESULT_SHIFT=" + support::cpp11::to_string(output_stage.gemmlowp_shifts[0]));
178 build_opts.add_option_if(_is_quantized_per_channel, "-DPER_CHANNEL_QUANTIZATION");
179 build_opts.add_option("-DOUTPUT_DATA_TYPE=" + get_cl_type_from_data_type(dst->data_type()));
180
181 PixelValue min_val{};
182 PixelValue max_val{};
183 std::tie(min_val, max_val) = get_min_max(dst->data_type());
184 build_opts.add_option_if((min > min_val.get<int32_t>()), "-DMIN_BOUND=" + support::cpp11::to_string(min));
185 build_opts.add_option_if((max < max_val.get<int32_t>()), "-DMAX_BOUND=" + support::cpp11::to_string(max));
186
187 std::string kernel_name("gemmlowp_offset_contribution");
188 kernel_name += "_" + string_from_gemmlowp_output_stage(output_stage.type);
189
190 // A macro guard to compile ONLY the kernel of interest
191 build_opts.add_option("-D" + upper_string(kernel_name));
192
193 // Create kernel
194 _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
195
196 // Configure kernel window
197 Window win = calculate_max_window(*mm_result, Steps(num_elems_processed_per_iteration));
198 ICLKernel::configure_internal(win);
199
200 // Set config_id for enabling LWS tuning
201 _config_id = kernel_name + "_";
202 _config_id += support::cpp11::to_string(mm_result->dimension(0));
203 _config_id += "_";
204 _config_id += support::cpp11::to_string(mm_result->dimension(1));
205 _config_id += "_";
206 _config_id += support::cpp11::to_string(mm_result->dimension(2));
207
208 ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
209 }
210
validate(const ITensorInfo * mm_result,const ITensorInfo * vector_sum_col,const ITensorInfo * vector_sum_row,const ITensorInfo * bias,const ITensorInfo * dst,int32_t a_offset,int32_t b_offset,const GEMMLowpOutputStageInfo & output_stage,const ITensorInfo * output_multipliers,const ITensorInfo * output_shifts)211 Status ClGemmLowpOffsetContributionOutputStageKernel::validate(const ITensorInfo *mm_result, const ITensorInfo *vector_sum_col, const ITensorInfo *vector_sum_row, const ITensorInfo *bias,
212 const ITensorInfo *dst, int32_t a_offset, int32_t b_offset, const GEMMLowpOutputStageInfo &output_stage,
213 const ITensorInfo *output_multipliers, const ITensorInfo *output_shifts)
214 {
215 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(mm_result, vector_sum_col, vector_sum_row, bias, dst, a_offset, b_offset, output_stage, output_multipliers, output_shifts));
216 return Status{};
217 }
218
run_op(ITensorPack & tensors,const Window & window,cl::CommandQueue & queue)219 void ClGemmLowpOffsetContributionOutputStageKernel::run_op(ITensorPack &tensors, const Window &window, cl::CommandQueue &queue)
220 {
221 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
222 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
223
224 const auto mm_result = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
225 const auto bias = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_BIAS));
226 const auto vector_sum_col = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_VEC_COL_SUM));
227 const auto vector_sum_row = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_VEC_ROW_SUM));
228 const auto output_shifts = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SHIFTS));
229 const auto output_multipliers = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_MULTIPLIERS));
230 auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
231
232 Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
233 Window slice = collapsed.first_slice_window_3D();
234
235 // Set window for vector_sum_col
236 Window win_vector_sum_col = slice;
237 win_vector_sum_col.set(Window::DimY, Window::Dimension(0, 0, 0));
238 win_vector_sum_col.set(Window::DimZ, Window::Dimension(0, 0, 0));
239
240 // Set window for vector_sum_row
241 Window win_vector_sum_row = slice;
242 win_vector_sum_row.set(Window::DimX, Window::Dimension(0, 0, 0));
243 win_vector_sum_row.set(Window::DimY, Window::Dimension(0, 0, 0));
244 win_vector_sum_col.set(Window::DimZ, Window::Dimension(0, 0, 0));
245
246 Window biases_slice = slice;
247 biases_slice.set(Window::DimY, Window::Dimension(0, 1, 1));
248 biases_slice.set(Window::DimZ, Window::Dimension(0, 1, 1));
249
250 do
251 {
252 unsigned int idx = 0;
253 add_3D_tensor_argument(idx, mm_result, slice);
254 add_2D_tensor_argument_if((vector_sum_col != nullptr), idx, vector_sum_col, win_vector_sum_col);
255 add_2D_tensor_argument_if((vector_sum_row != nullptr), idx, vector_sum_row, win_vector_sum_row);
256 add_1D_tensor_argument_if((bias != nullptr), idx, bias, biases_slice);
257 add_3D_tensor_argument(idx, dst, slice);
258 add_1D_tensor_argument_if(_is_quantized_per_channel, idx, output_multipliers, biases_slice);
259 add_1D_tensor_argument_if(_is_quantized_per_channel, idx, output_shifts, biases_slice);
260 enqueue(queue, *this, slice, lws_hint());
261 }
262 while(collapsed.slide_window_slice_3D(slice));
263 }
264 } // namespace kernels
265 } // namespace opencl
266 } // namespace arm_compute
267