xref: /aosp_15_r20/external/ComputeLibrary/src/gpu/cl/operators/ClSoftmax.cpp (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "src/gpu/cl/operators/ClSoftmax.h"
25 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
26 #include "src/core/helpers/MemoryHelpers.h"
27 #include "src/core/helpers/SoftmaxHelpers.h"
28 #include "src/gpu/cl/kernels/ClSoftmaxKernel.h"
29 #include "src/gpu/cl/operators/ClPermute.h"
30 #include "src/gpu/cl/utils/ClAuxTensorHandler.h"
31 #include "support/Cast.h"
32 
33 #include "src/common/utils/Log.h"
34 
35 using namespace arm_compute::experimental;
36 
37 namespace arm_compute
38 {
39 namespace opencl
40 {
ClSoftmax()41 ClSoftmax::ClSoftmax()
42     : _permute_input(std::make_unique<ClPermute>()),
43       _permute_output(std::make_unique<ClPermute>()),
44       _max_shift_exp_sum_kernel(std::make_unique<kernels::ClLogits1DMaxShiftExpSumKernel>()),
45       _norm_kernel(std::make_unique<kernels::ClLogits1DNormKernel>()),
46       _max_info(),
47       _sum_info(),
48       _tmp_info(),
49       _permuted_src_info(),
50       _permuted_dst_info(),
51       _aux_mem(InternalTensorIdx::COUNT)
52 {
53 }
54 
configure(const CLCompileContext & compile_context,const ITensorInfo & src,ITensorInfo & dst,const SoftmaxKernelInfo & info)55 void ClSoftmax::configure(const CLCompileContext &compile_context, const ITensorInfo &src, ITensorInfo &dst, const SoftmaxKernelInfo &info)
56 {
57     ARM_COMPUTE_ERROR_THROW_ON(validate(src, dst, info));
58     ARM_COMPUTE_LOG_PARAMS(src, dst, info);
59 
60     const size_t actual_axis = static_cast<size_t>(wrap_around(info.axis, static_cast<int32_t>(src.num_dimensions())));
61 
62     _needs_permute = actual_axis != 0;
63 
64     const ITensorInfo &tmp_input_info  = _needs_permute ? _permuted_src_info : src;
65     ITensorInfo       &tmp_output_info = _needs_permute ? _permuted_dst_info : dst;
66 
67     if(_needs_permute)
68     {
69         const auto perm_info = softmax_helpers::get_permutation_vector_from_softmax_axis(actual_axis);
70         _permute_input->configure(compile_context, &src, &_permuted_src_info, perm_info);
71     }
72 
73     DataType tmp_data_type = is_data_type_quantized_asymmetric(tmp_input_info.data_type()) ? DataType::S32 : tmp_input_info.data_type();
74     _tmp_info              = tmp_input_info.clone()->set_data_type(tmp_data_type);
75 
76     TensorShape max_sum_shape = tmp_input_info.tensor_shape();
77     _max_info                 = tmp_input_info.clone()->set_tensor_shape(max_sum_shape);
78     _sum_info                 = tmp_input_info.clone()->set_tensor_shape(max_sum_shape).set_data_type(tmp_data_type);
79 
80     // Set GPU target to kernels
81     _max_shift_exp_sum_kernel->set_target(CLScheduler::get().target());
82 
83     _max_shift_exp_sum_kernel->configure(compile_context, tmp_input_info, _max_info, _tmp_info, _sum_info, info);
84     _norm_kernel->configure(compile_context, _tmp_info, _sum_info, tmp_output_info, info);
85 
86     if(_needs_permute)
87     {
88         const auto perm_info = softmax_helpers::get_permutation_vector_from_softmax_axis(actual_axis);
89         _permute_output->configure(compile_context, &_permuted_dst_info, &dst, perm_info);
90     }
91 
92     _aux_mem[InternalTensorIdx::SUM] = MemoryInfo(offset_int_vec(InternalTensorIdx::SUM), MemoryLifetime::Temporary, _sum_info.total_size());
93     _aux_mem[InternalTensorIdx::TMP] = MemoryInfo(offset_int_vec(InternalTensorIdx::TMP), MemoryLifetime::Temporary, _tmp_info.total_size());
94     _aux_mem[InternalTensorIdx::MAX] = MemoryInfo(offset_int_vec(InternalTensorIdx::MAX), MemoryLifetime::Temporary, _max_info.total_size());
95 
96     _aux_mem[InternalTensorIdx::PERMUTED_SRC] = MemoryInfo(offset_int_vec(InternalTensorIdx::PERMUTED_SRC), MemoryLifetime::Temporary, _permuted_src_info.total_size());
97     _aux_mem[InternalTensorIdx::PERMUTED_DST] = MemoryInfo(offset_int_vec(InternalTensorIdx::PERMUTED_DST), MemoryLifetime::Temporary, _permuted_dst_info.total_size());
98 }
99 
validate(const ITensorInfo & src,const ITensorInfo & dst,const SoftmaxKernelInfo & info)100 Status ClSoftmax::validate(const ITensorInfo &src, const ITensorInfo &dst, const SoftmaxKernelInfo &info)
101 {
102     ARM_COMPUTE_RETURN_ERROR_ON_MSG(src.num_dimensions() > 4, "Only up to 4 dimensions are supported");
103     ARM_COMPUTE_UNUSED(info.beta);
104     ARM_COMPUTE_RETURN_ERROR_ON(info.axis < static_cast<int32_t>(-src.num_dimensions()) || static_cast<int32_t>(src.num_dimensions()) <= info.axis);
105 
106     const size_t actual_axis   = static_cast<size_t>(wrap_around(info.axis, static_cast<int32_t>(src.num_dimensions())));
107     const bool   needs_permute = actual_axis != 0;
108     if(needs_permute)
109     {
110         const PermutationVector permutation_vector = softmax_helpers::get_permutation_vector_from_softmax_axis(actual_axis);
111         const TensorShape       permuted_shape     = misc::shape_calculator::compute_permutation_output_shape(src, permutation_vector);
112         TensorInfo              input_permuted(src.clone()->set_tensor_shape(permuted_shape));
113         ARM_COMPUTE_RETURN_ON_ERROR(ClPermute::validate(&src, &input_permuted, permutation_vector));
114         TensorInfo output_permuted(dst.clone()->set_tensor_shape(permuted_shape));
115         ARM_COMPUTE_RETURN_ON_ERROR(ClPermute::validate(&output_permuted, &dst, permutation_vector));
116     }
117 
118     // Create intermediate tensor info
119     DataType   tmp_data_type = is_data_type_quantized_asymmetric(src.data_type()) ? DataType::S32 : src.data_type();
120     TensorInfo tensor_info_tmp(src.clone()->set_data_type(tmp_data_type).set_is_resizable(true));
121 
122     TensorShape max_sum_shape = src.tensor_shape();
123     max_sum_shape.set(0, 1);
124     TensorInfo tensor_info_max(src.clone()->set_tensor_shape(max_sum_shape).set_is_resizable(true));
125     TensorInfo tensor_info_sum(src.clone()->set_tensor_shape(max_sum_shape).set_data_type(tmp_data_type).set_quantization_info(QuantizationInfo()).set_is_resizable(true));
126 
127     ARM_COMPUTE_RETURN_ON_ERROR(kernels::ClLogits1DMaxShiftExpSumKernel::validate(src, tensor_info_max, tensor_info_tmp, tensor_info_sum));
128     ARM_COMPUTE_RETURN_ON_ERROR(kernels::ClLogits1DNormKernel::validate(tensor_info_tmp, tensor_info_sum, dst, info));
129 
130     return Status{};
131 }
132 
run(ITensorPack & tensors)133 void ClSoftmax::run(ITensorPack &tensors)
134 {
135     auto src = tensors.get_const_tensor(TensorType::ACL_SRC);
136     auto dst = tensors.get_tensor(TensorType::ACL_DST);
137 
138     CLAuxTensorHandler sum(offset_int_vec(InternalTensorIdx::SUM), _sum_info, tensors, false);
139     CLAuxTensorHandler tmp(offset_int_vec(InternalTensorIdx::TMP), _tmp_info, tensors, false);
140     CLAuxTensorHandler max(offset_int_vec(InternalTensorIdx::MAX), _max_info, tensors, false);
141 
142     CLAuxTensorHandler permuted_src(offset_int_vec(InternalTensorIdx::PERMUTED_SRC), _permuted_src_info, tensors, false);
143     CLAuxTensorHandler permuted_dst(offset_int_vec(InternalTensorIdx::PERMUTED_DST), _permuted_dst_info, tensors, false);
144 
145     if(_needs_permute)
146     {
147         ITensorPack pack;
148         pack.add_const_tensor(TensorType::ACL_SRC, src);
149         pack.add_tensor(TensorType::ACL_DST, permuted_src.get());
150         _permute_input.get()->run(pack);
151     }
152 
153     ITensorPack sum_pack;
154     ITensorPack norm_pack;
155     if(_needs_permute)
156     {
157         sum_pack.add_const_tensor(TensorType::ACL_SRC, permuted_src.get());
158         norm_pack.add_tensor(TensorType::ACL_DST, permuted_dst.get());
159     }
160     else
161     {
162         sum_pack.add_const_tensor(TensorType::ACL_SRC, src);
163         norm_pack.add_tensor(TensorType::ACL_DST, dst);
164     }
165     sum_pack.add_tensor(TensorType::ACL_DST, tmp.get());
166     sum_pack.add_tensor(TensorType::ACL_INT_0, max.get());
167     sum_pack.add_tensor(TensorType::ACL_INT_1, sum.get());
168 
169     norm_pack.add_const_tensor(TensorType::ACL_SRC, tmp.get());
170     norm_pack.add_tensor(TensorType::ACL_INT_0, sum.get());
171 
172     CLScheduler::get().enqueue_op(*_max_shift_exp_sum_kernel.get(), sum_pack, false);
173     CLScheduler::get().enqueue_op(*_norm_kernel.get(), norm_pack, false);
174 
175     if(_needs_permute)
176     {
177         ITensorPack pack;
178         pack.add_const_tensor(TensorType::ACL_SRC, permuted_dst.get());
179         pack.add_tensor(TensorType::ACL_DST, dst);
180         _permute_output.get()->run(pack);
181     }
182 }
183 
workspace() const184 experimental::MemoryRequirements ClSoftmax::workspace() const
185 {
186     return _aux_mem;
187 }
188 } // namespace opencl
189 } // namespace arm_compute