xref: /aosp_15_r20/external/ComputeLibrary/src/gpu/cl/kernels/ClSoftmaxKernel.cpp (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "src/gpu/cl/kernels/ClSoftmaxKernel.h"
25 #include "arm_compute/core/CL/ICLTensor.h"
26 #include "arm_compute/core/Utils.h"
27 #include "arm_compute/core/experimental/Types.h"
28 #include "arm_compute/core/utils/quantization/AsymmHelpers.h"
29 #include "src/core/CL/CLValidate.h"
30 #include "src/core/helpers/AutoConfiguration.h"
31 #include "src/core/helpers/WindowHelpers.h"
32 #include "support/Cast.h"
33 #include "support/StringSupport.h"
34 
35 namespace arm_compute
36 {
37 namespace opencl
38 {
39 namespace kernels
40 {
41 namespace
42 {
43 /** Calculates softmax parameters from the quantized input scale and scaling factor for the exponent and places them as build options.
44  *
45  * Prepares these build options:
46  * -INPUT_BETA_MULTIPLIER, INPUT_BETA_LEFT_SHIFT - quantized representation of beta multiplier.
47  * -DIFF_MIN - threshold difference between maximum value of input data and current processed value,
48  *             it defines whether the value will be taken into account or not.
49  *
50  * @param[in] build_opts  Build options to extend
51  * @param[in] input_scale Input scaling factor
52  * @param[in] beta        Exponent scaling factor beta
53  */
prepare_quantized_softmax_build_options(float input_scale,float beta)54 CLBuildOptions prepare_quantized_softmax_build_options(float input_scale, float beta)
55 {
56     // Number of integer bits in temporary fixed-point representation of current-to-max difference
57     static const int scaled_diff_int_bits = 5;
58     // Number of integer bits used in temporary fixed-point representation of exponent accumulator
59     static const int exp_accumulation_in_bits = 12;
60 
61     const double beta_multiplier = std::min(
62                                        1.0 * beta * input_scale * (1 << (31 - scaled_diff_int_bits)),
63                                        (1LL << 31) - 1.0);
64     int input_beta_multiplier;
65     int input_beta_left_shift;
66     quantization::calculate_quantized_multiplier_greater_than_one(beta_multiplier, &input_beta_multiplier, &input_beta_left_shift);
67 
68     const double max_input_rescaled = 1.0 * ((1 << scaled_diff_int_bits) - 1) * (1LL << (31 - scaled_diff_int_bits)) / (1LL << input_beta_left_shift);
69     const int    diff_min           = -1.f * std::floor(max_input_rescaled);
70 
71     CLBuildOptions build_opts;
72     build_opts.add_option("-DSCALED_DIFF_INT_BITS=" + support::cpp11::to_string(scaled_diff_int_bits));
73     build_opts.add_option("-DEXP_ACCUMULATION_INT_BITS=" + support::cpp11::to_string(exp_accumulation_in_bits));
74     build_opts.add_option("-DINPUT_BETA_MULTIPLIER=" + support::cpp11::to_string(input_beta_multiplier));
75     build_opts.add_option("-DINPUT_BETA_LEFT_SHIFT=" + support::cpp11::to_string(input_beta_left_shift));
76     build_opts.add_option("-DDIFF_MIN=" + support::cpp11::to_string(diff_min));
77 
78     return build_opts;
79 }
80 
validate_arguments_1DMaxShiftExpSum(const ITensorInfo & src,const ITensorInfo & max,const ITensorInfo & dst,const ITensorInfo & sum)81 Status validate_arguments_1DMaxShiftExpSum(const ITensorInfo &src, const ITensorInfo &max, const ITensorInfo &dst, const ITensorInfo &sum)
82 {
83     ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(&src);
84     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&src, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
85     ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&src, &max);
86 
87     const bool is_quantized_asymmetric = is_data_type_quantized_asymmetric(src.data_type());
88 
89     // Checks performed when output is configured
90     if(dst.total_size() != 0)
91     {
92         if(is_quantized_asymmetric)
93         {
94             ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&dst, 1, DataType::S32);
95         }
96         else
97         {
98             ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&src, &dst);
99         }
100         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(&src, &dst);
101     }
102 
103     // Checks performed when sum is configured
104     if(sum.total_size() != 0)
105     {
106         if(is_quantized_asymmetric)
107         {
108             ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&sum, 1, DataType::S32);
109         }
110         else
111         {
112             ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&max, &sum);
113         }
114         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(&max, &sum);
115     }
116 
117     return Status{};
118 }
119 
validate_arguments_1DNorm(const ITensorInfo & src,const ITensorInfo & sum,const ITensorInfo & dst,const SoftmaxKernelInfo & info)120 Status validate_arguments_1DNorm(const ITensorInfo &src, const ITensorInfo &sum, const ITensorInfo &dst, const SoftmaxKernelInfo &info)
121 {
122     ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(&src);
123     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&src, 1, DataType::S32, DataType::F16, DataType::F32);
124     ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&src, &sum);
125     ARM_COMPUTE_RETURN_ERROR_ON(info.is_log && !is_data_type_float(info.input_data_type));
126 
127     // Note: output should always have a scale of 1/256 and offset 0
128     const QuantizationInfo allowed_quantization_info = get_softmax_output_quantization_info(info.input_data_type, info.is_log);
129     const bool             is_quantized_asymmetric   = is_data_type_quantized_asymmetric(info.input_data_type);
130 
131     // Checks performed when output is configured
132     if(dst.total_size() != 0)
133     {
134         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(&src, &dst);
135         if(!is_quantized_asymmetric)
136         {
137             ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&src, &dst);
138         }
139         else
140         {
141             ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&dst, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED);
142             ARM_COMPUTE_RETURN_ERROR_ON(dst.quantization_info() != allowed_quantization_info);
143         }
144     }
145 
146     return Status{};
147 }
148 } // namespace
149 
150 /**< Grid size (obtained through auto-tuning) */
151 const unsigned int ClLogits1DMaxShiftExpSumKernel::_grid_size = 64;
152 /**< Vector size in the serial case (obtained through auto-tuning) */
153 const unsigned int ClLogits1DMaxShiftExpSumKernel::_serial_vector_size = 8;
154 /**< Vector size in the parallel case (obtained through auto-tuning, enables the best memory access pattern for Bifrost) .*/
155 const unsigned int ClLogits1DMaxShiftExpSumKernel::_parallel_vector_size = 4;
156 
ClLogits1DMaxShiftExpSumKernel()157 ClLogits1DMaxShiftExpSumKernel::ClLogits1DMaxShiftExpSumKernel()
158 {
159     _type = CLKernelType::ELEMENTWISE;
160 }
161 
configure(const CLCompileContext & compile_context,const ITensorInfo & src,ITensorInfo & max,ITensorInfo & dst,ITensorInfo & sum,const SoftmaxKernelInfo & info)162 void ClLogits1DMaxShiftExpSumKernel::configure(const CLCompileContext &compile_context, const ITensorInfo &src, ITensorInfo &max, ITensorInfo &dst, ITensorInfo &sum, const SoftmaxKernelInfo &info)
163 {
164     auto padding_info = get_padding_info({ &src, &max, &dst, &sum });
165 
166     // Output auto initialization if not yet initialized
167     auto_init_if_empty(sum, src.clone()->set_tensor_shape(max.tensor_shape()));
168     auto_init_if_empty(dst, *src.clone());
169 
170     // Perform validation step
171     ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_1DMaxShiftExpSum(src, max, dst, sum));
172 
173     const DataType                dt                 = src.data_type();
174     const UniformQuantizationInfo qinfo              = src.quantization_info().uniform();
175     const size_t                  reduction_dim_size = src.dimension(0);
176     const float                   beta               = info.beta;
177     const auto                    is_signed_qasymm8  = is_data_type_quantized_asymmetric_signed(info.input_data_type);
178     const int                     min_value          = is_signed_qasymm8 ? CL_SCHAR_MIN : 0;
179 
180     const unsigned int vector_size = adjust_vec_size(_serial_vector_size, reduction_dim_size);
181 
182     // Set build options
183     CLBuildOptions build_opts;
184     build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(dt));
185     build_opts.add_option("-DMIN_VALUE=" + support::cpp11::to_string(min_value));
186     build_opts.add_option("-DVECTOR_SIZE=" + support::cpp11::to_string(vector_size));
187     build_opts.add_option("-DSRC_WIDTH=" + support::cpp11::to_string(reduction_dim_size));
188     build_opts.add_option("-DVECTOR_SIZE_LEFTOVER=" + support::cpp11::to_string(reduction_dim_size % vector_size));
189     build_opts.add_option("-DLOG_VECTOR_SIZE=" + support::cpp11::to_string(lround(log2(vector_size))));
190     build_opts.add_option_if((reduction_dim_size % vector_size) != 0, "-DNON_MULTIPLE_OF_VECTOR_SIZE");
191     build_opts.add_option_if(is_signed_qasymm8, "-DQASYMM8_SIGNED");
192     build_opts.add_option_if(is_data_type_float(dt) && (beta != 1.0f), "-DBETA=" + float_to_string_with_full_precision(beta));
193     build_opts.add_option_if(is_data_type_float(dt) && info.is_log, "-DLOG_SOFTMAX");
194     build_opts.add_option_if(is_data_type_float(dt), "-DMINVAL=" + ((dt == DataType::F16) ? std::string("-HALF_MAX") : std::string("-FLT_MAX")));
195     build_opts.add_option_if(is_data_type_quantized_asymmetric(dt), "-DSCALE=" + float_to_string_with_full_precision(qinfo.scale));
196     build_opts.add_option_if(is_data_type_quantized_asymmetric(dt), "-DBETA=" + float_to_string_with_full_precision(beta));
197     build_opts.add_options_if(is_data_type_quantized_asymmetric(dt), prepare_quantized_softmax_build_options(qinfo.scale, beta).options());
198 
199     cl::NDRange lws_hint(cl::NullRange);
200     std::string kernel_name = std::string("softmax_layer_max_shift_exp_sum_") + (is_data_type_quantized_asymmetric(dt) ? "quantized_" : "") + "serial";
201 
202     // Create kernel.
203     _kernel = create_kernel(compile_context, kernel_name, build_opts.options());
204 
205     // Configure window
206     Window win = calculate_max_window(src, Steps(reduction_dim_size));
207     IClKernel::configure_internal(win, lws_hint);
208 
209     ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
210 }
211 
validate(const ITensorInfo & src,const ITensorInfo & max,const ITensorInfo & dst,const ITensorInfo & sum)212 Status ClLogits1DMaxShiftExpSumKernel::validate(const ITensorInfo &src, const ITensorInfo &max, const ITensorInfo &dst, const ITensorInfo &sum)
213 {
214     ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_1DMaxShiftExpSum(src, max, dst, sum));
215     return Status{};
216 }
217 
is_parallel_reduction(size_t size)218 ClLogits1DMaxShiftExpSumKernel::ParallelReductionInfo ClLogits1DMaxShiftExpSumKernel::is_parallel_reduction(size_t size)
219 {
220     bool         is_parallel_reduction = (size >= (_grid_size * _serial_vector_size)) && (_grid_size > 1);
221     unsigned int vector_size           = is_parallel_reduction ? _parallel_vector_size : _serial_vector_size;
222     return std::make_tuple(is_parallel_reduction, vector_size);
223 }
224 
run_op(ITensorPack & tensors,const Window & window,::cl::CommandQueue & queue)225 void ClLogits1DMaxShiftExpSumKernel::run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue)
226 {
227     ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
228     ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
229 
230     auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
231     auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
232     auto max = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_INT_0));
233     auto sum = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_INT_1));
234 
235     ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst, max, sum);
236 
237     // Collapse window in Z dimension
238     Window window_collapsed = window.collapse_if_possible(IClKernel::window(), Window::DimZ);
239 
240     // Reconfigure window in case of parallel reduction
241     ParallelReductionInfo parallel_reduction_info = is_parallel_reduction(src->info()->dimension(0));
242     if(std::get<0>(parallel_reduction_info))
243     {
244         // Launch grid_size parallel work items
245         window_collapsed.set(Window::DimX, Window::Dimension(0, _grid_size, 1));
246     }
247 
248     // Get slices
249     Window slice = window_collapsed.first_slice_window_3D();
250     do
251     {
252         unsigned int idx = 0;
253         // Set inputs
254         add_3D_tensor_argument(idx, src, slice);
255         add_3D_tensor_argument(idx, max, slice);
256         add_3D_tensor_argument(idx, dst, slice);
257         add_3D_tensor_argument(idx, sum, slice);
258         enqueue(queue, *this, slice, lws_hint());
259     }
260     while(window_collapsed.slide_window_slice_3D(slice));
261 }
262 
ClLogits1DNormKernel()263 ClLogits1DNormKernel::ClLogits1DNormKernel()
264 {
265     _type = CLKernelType::ELEMENTWISE;
266 }
267 
configure(const CLCompileContext & compile_context,const ITensorInfo & src,const ITensorInfo & sum,ITensorInfo & dst,const SoftmaxKernelInfo & info)268 void ClLogits1DNormKernel::configure(const CLCompileContext &compile_context, const ITensorInfo &src, const ITensorInfo &sum, ITensorInfo &dst, const SoftmaxKernelInfo &info)
269 {
270     auto padding_info = get_padding_info({ &src, &dst, &sum });
271 
272     // Note: output should always have a scale of 1/256 and offset 0
273     const bool                    is_quantized_asymmetric   = is_data_type_quantized_asymmetric(info.input_data_type);
274     const DataType                output_data_type          = info.input_data_type;
275     const QuantizationInfo        allowed_quantization_info = get_softmax_output_quantization_info(info.input_data_type, info.is_log);
276     const UniformQuantizationInfo qinfo                     = src.quantization_info().uniform();
277 
278     // Output auto initialization if not yet initialized
279     auto_init_if_empty(dst, src.clone()->set_data_type(output_data_type).set_quantization_info(allowed_quantization_info));
280 
281     // Perform validation step
282     ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_1DNorm(src, sum, dst, info));
283 
284     const auto         is_signed_qasymm8 = is_data_type_quantized_asymmetric_signed(info.input_data_type);
285     const int          min_value         = is_signed_qasymm8 ? CL_SCHAR_MIN : 0;
286     const unsigned int vector_size       = adjust_vec_size(16, src.dimension(0));
287 
288     // Set build options
289     CLBuildOptions build_opts;
290     build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(info.input_data_type));
291     build_opts.add_option("-DMIN_VALUE=" + support::cpp11::to_string(min_value));
292     build_opts.add_option("-DVECTOR_SIZE=" + support::cpp11::to_string(vector_size));
293     build_opts.add_option("-DVECTOR_SIZE_LEFTOVER=" + support::cpp11::to_string(src.dimension(0) % vector_size));
294     build_opts.add_option_if(is_data_type_quantized_asymmetric_signed(info.input_data_type), "-DQASYMM8_SIGNED");
295     build_opts.add_options_if(is_quantized_asymmetric,
296                               prepare_quantized_softmax_build_options(qinfo.scale, info.beta).options());
297     build_opts.add_option_if(info.is_log, "-DLOG_SOFTMAX");
298     build_opts.add_option_if(is_quantized_asymmetric, "-DSCALE=" + float_to_string_with_full_precision(qinfo.scale));
299     build_opts.add_option_if(is_quantized_asymmetric, "-DBETA=" + float_to_string_with_full_precision(info.beta));
300 
301     // Create kernel
302     std::string kernel_name = std::string("softmax_layer_norm") + (is_quantized_asymmetric ? "_quantized" : "");
303     _kernel                 = create_kernel(compile_context, kernel_name, build_opts.options());
304 
305     // Configure window
306     auto win = calculate_max_window(src, Steps(vector_size));
307     ICLKernel::configure_internal(win);
308 
309     ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
310 }
311 
validate(const ITensorInfo & src,const ITensorInfo & sum,const ITensorInfo & dst,const SoftmaxKernelInfo & info)312 Status ClLogits1DNormKernel::validate(const ITensorInfo &src, const ITensorInfo &sum, const ITensorInfo &dst, const SoftmaxKernelInfo &info)
313 {
314     ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_1DNorm(src, sum, dst, info));
315 
316     return Status{};
317 }
318 
run_op(ITensorPack & tensors,const Window & window,::cl::CommandQueue & queue)319 void ClLogits1DNormKernel::run_op(ITensorPack &tensors, const Window &window, ::cl::CommandQueue &queue)
320 {
321     ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
322     ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
323 
324     auto src = utils::cast::polymorphic_downcast<const ICLTensor *>(tensors.get_const_tensor(TensorType::ACL_SRC));
325     auto dst = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_DST));
326     auto sum = utils::cast::polymorphic_downcast<ICLTensor *>(tensors.get_tensor(TensorType::ACL_INT_0));
327 
328     ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst, sum);
329 
330     Window window_collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
331     Window slice            = window_collapsed.first_slice_window_3D();
332 
333     do
334     {
335         Window sum_slice = slice;
336         sum_slice.set(Window::DimX, Window::Dimension(0, 1, 1));
337 
338         unsigned int idx = 0;
339         // Set inputs
340         add_3D_tensor_argument(idx, src, slice);
341         add_3D_tensor_argument(idx, sum, sum_slice);
342         add_3D_tensor_argument(idx, dst, slice);
343         enqueue(queue, *this, slice, lws_hint());
344     }
345     while(window_collapsed.slide_window_slice_3D(slice));
346 }
347 } // namespace kernels
348 } // namespace opencl
349 } // namespace arm_compute