xref: /aosp_15_r20/external/ComputeLibrary/src/runtime/CL/functions/CLGEMMDeconvolutionLayer.cpp (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2019-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_compute/runtime/CL/functions/CLGEMMDeconvolutionLayer.h"
25 
26 #include "arm_compute/core/Helpers.h"
27 #include "arm_compute/core/Validate.h"
28 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
29 #include "arm_compute/core/utils/quantization/AsymmHelpers.h"
30 #include "arm_compute/runtime/CL/CLScheduler.h"
31 #include "src/core/CL/kernels/CLDeconvolutionReshapeOutputKernel.h"
32 #include "src/core/CL/kernels/CLFillBorderKernel.h"
33 
34 #include "src/common/utils/Log.h"
35 
36 #include <tuple>
37 
38 namespace arm_compute
39 {
40 namespace
41 {
compute_start_end_slice_coordinates(const ITensorInfo & output_info,const PadStrideInfo & deconv_info,bool is_nchw)42 std::pair<Coordinates, Coordinates> compute_start_end_slice_coordinates(const ITensorInfo &output_info, const PadStrideInfo &deconv_info, bool is_nchw)
43 {
44     Coordinates start;
45     Coordinates end;
46 
47     if(is_nchw)
48     {
49         start.set(0, deconv_info.pad_left());
50         start.set(1, deconv_info.pad_top());
51         end.set(0, output_info.dimension(0) - deconv_info.pad_right());
52         end.set(1, output_info.dimension(1) - deconv_info.pad_bottom());
53     }
54     else
55     {
56         start.set(0, 0);
57         start.set(1, deconv_info.pad_left());
58         start.set(2, deconv_info.pad_top());
59 
60         end.set(0, output_info.dimension(0));
61         end.set(1, output_info.dimension(1) - deconv_info.pad_right());
62         end.set(2, output_info.dimension(2) - deconv_info.pad_bottom());
63     }
64 
65     return { start, end };
66 }
construct_gemmlowp_output_stage(const ITensorInfo * input,const ITensorInfo * weights,const ITensorInfo * output,GEMMLowpOutputStageInfo & output_stage_info)67 Status construct_gemmlowp_output_stage(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, GEMMLowpOutputStageInfo &output_stage_info)
68 {
69     const auto data_type = input->data_type();
70 
71     if(is_data_type_quantized_asymmetric(data_type))
72     {
73         const UniformQuantizationInfo iq_info = input->quantization_info().uniform();
74         const UniformQuantizationInfo wq_info = weights->quantization_info().uniform();
75         const UniformQuantizationInfo oq_info = output->quantization_info().uniform();
76 
77         float multiplier = iq_info.scale * wq_info.scale / oq_info.scale;
78         int   output_multiplier(0);
79         int   output_shift(0);
80         ARM_COMPUTE_RETURN_ON_ERROR(quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift));
81 
82         output_stage_info.type                = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
83         output_stage_info.gemmlowp_multiplier = output_multiplier;
84         output_stage_info.gemmlowp_shift      = output_shift;
85         output_stage_info.gemmlowp_offset     = oq_info.offset;
86         const auto min_max_bound              = get_min_max(data_type);
87         output_stage_info.gemmlowp_min_bound  = (std::get<0>(min_max_bound)).get<int32_t>();
88         output_stage_info.gemmlowp_max_bound  = (std::get<1>(min_max_bound)).get<int32_t>();
89         output_stage_info.output_data_type    = data_type;
90     }
91     return Status{};
92 }
93 
94 } // namespace
95 
CLGEMMDeconvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager)96 CLGEMMDeconvolutionLayer::CLGEMMDeconvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager) // NOLINT
97     : _memory_group(std::move(memory_manager)),
98       _mm_gemm(),
99       _mm_gemmlowp(),
100       _gemmlowp_output_stage(),
101       _permute_input_to_nhwc(),
102       _permute_weights_to_nhwc(),
103       _reshape_weights(),
104       _transpose_weights(),
105       _deconv_reshape(std::make_unique<CLDeconvolutionReshapeOutputKernel>()),
106       _slice_gemm(),
107       _gemmlowp_final(),
108       _reshaped_weights(),
109       _reshaped_weights_t(),
110       _permuted_input(),
111       _permuted_weights(),
112       _gemm_output(),
113       _slice_gemm_input(),
114       _original_weights(),
115       _is_prepared(false),
116       _padded_input(false),
117       _is_nchw(false),
118       _is_quantized(false)
119 {
120 }
121 
122 CLGEMMDeconvolutionLayer::~CLGEMMDeconvolutionLayer() = default;
123 
validate(const ITensorInfo * input,const ITensorInfo * weights,const ITensorInfo * bias,const ITensorInfo * output,const PadStrideInfo & deconv_info)124 Status CLGEMMDeconvolutionLayer::validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *bias, const ITensorInfo *output, const PadStrideInfo &deconv_info)
125 {
126     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, weights, output);
127     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F32, DataType::F16, DataType::QASYMM8, DataType::QASYMM8_SIGNED);
128     ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, weights);
129     ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, weights);
130 
131     DataLayout data_layout  = input->data_layout();
132     const bool padded_input = deconv_info.pad_bottom() > 0 || deconv_info.pad_left() > 0 || deconv_info.pad_right() > 0 || deconv_info.pad_top() > 0;
133     const bool is_nchw      = input->data_layout() == DataLayout::NCHW;
134     const bool is_quantized = is_data_type_quantized_asymmetric(input->data_type());
135 
136     const size_t idx_w = get_data_layout_dimension_index(data_layout, DataLayoutDimension::WIDTH);
137     const size_t idx_h = get_data_layout_dimension_index(data_layout, DataLayoutDimension::HEIGHT);
138     const size_t idx_b = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
139 
140     ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_w) != deconv_info.stride().first);
141     ARM_COMPUTE_RETURN_ERROR_ON(weights->dimension(idx_h) != deconv_info.stride().second);
142 
143     TensorShape nhwc_weights_shape = weights->tensor_shape();
144     TensorShape nhwc_input_shape   = input->tensor_shape();
145 
146     if(is_nchw)
147     {
148         permute(nhwc_weights_shape, PermutationVector(2, 0, 1));
149         permute(nhwc_input_shape, PermutationVector(2, 0, 1));
150 
151         TensorInfo nhwc_input_info = input->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(nhwc_input_shape).set_data_layout(DataLayout::NCHW);
152 
153         TensorInfo nhwc_weights_info = weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(nhwc_weights_shape).set_data_layout(DataLayout::NCHW);
154 
155         CLPermute::validate(weights, &nhwc_weights_info, PermutationVector(2, 0, 1));
156         CLPermute::validate(input, &nhwc_input_info, PermutationVector(2, 0, 1));
157     }
158 
159     const TensorShape reshaped_shape = TensorShape(nhwc_weights_shape[0], nhwc_weights_shape[1] * nhwc_weights_shape[2] * nhwc_weights_shape[3]);
160     const TensorInfo  reshaped_info  = weights->clone()->set_tensor_shape(reshaped_shape).set_data_layout(DataLayout::NCHW).set_is_resizable(true);
161     ARM_COMPUTE_RETURN_ON_ERROR(CLReshapeLayer::validate(weights, &reshaped_info));
162 
163     TensorShape      transposed_shape(reshaped_shape[1], reshaped_shape[0]);
164     const TensorInfo reshaped_t_info = reshaped_info.clone()->set_is_resizable(true).set_tensor_shape(transposed_shape);
165     ARM_COMPUTE_RETURN_ON_ERROR(CLTranspose::validate(&reshaped_info, &reshaped_t_info));
166 
167     TensorShape gemm_output_shape(weights->dimension(idx_w) * weights->dimension(idx_h) * weights->dimension(idx_b),
168                                   input->dimension(idx_w),
169                                   input->dimension(idx_h),
170                                   input->dimension(idx_b));
171 
172     TensorInfo gemm_output_info = reshaped_t_info.clone()->set_tensor_shape(gemm_output_shape).set_is_resizable(true);
173     GEMMInfo   gemm_info(false, false, true, input->dimension(idx_h), true);
174 
175     GEMMLowpOutputStageInfo output_stage_info;
176 
177     if(is_quantized)
178     {
179         ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpMatrixMultiplyCore::validate(&input->clone()->set_tensor_shape(nhwc_input_shape), &reshaped_t_info, nullptr, &gemm_output_info.set_data_type(DataType::S32),
180                                                                            gemm_info));
181         ARM_COMPUTE_RETURN_ON_ERROR(construct_gemmlowp_output_stage(input, weights, output, output_stage_info));
182     }
183     else
184     {
185         ARM_COMPUTE_RETURN_ON_ERROR(CLGEMM::validate(&input->clone()->set_tensor_shape(nhwc_input_shape).set_is_resizable(true), &reshaped_t_info, nullptr, &gemm_output_info, 1.0f, 0.0f, gemm_info));
186     }
187 
188     const PadStrideInfo stride_info(deconv_info.stride().first, deconv_info.stride().second);
189     auto                out_dims           = deconvolution_output_dimensions(input->dimension(idx_w), input->dimension(idx_h), weights->dimension(idx_w), weights->dimension(idx_h), stride_info);
190     const TensorShape   deconv_shape       = misc::shape_calculator::compute_deconvolution_output_shape(out_dims, *input, *weights);
191     TensorInfo          col2im_output_info = gemm_output_info.clone()->set_tensor_shape(deconv_shape).set_is_resizable(true);
192 
193     if(padded_input && is_quantized)
194     {
195         const auto start_end = compute_start_end_slice_coordinates(col2im_output_info, deconv_info, is_nchw);
196         ARM_COMPUTE_RETURN_ON_ERROR(CLDeconvolutionReshapeOutputKernel::validate(&gemm_output_info, bias, &col2im_output_info, input, weights, deconv_info));
197         ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpOutputStage::validate(&col2im_output_info, nullptr, &col2im_output_info.clone()->set_is_resizable(true).set_data_type(input->data_type()), output_stage_info));
198         ARM_COMPUTE_RETURN_ON_ERROR(CLSlice::validate(&col2im_output_info.clone()->set_is_resizable(true).set_data_type(input->data_type()), output, start_end.first, start_end.second));
199     }
200     else if(padded_input)
201     {
202         const auto start_end = compute_start_end_slice_coordinates(col2im_output_info, deconv_info, is_nchw);
203         ARM_COMPUTE_RETURN_ON_ERROR(CLDeconvolutionReshapeOutputKernel::validate(&gemm_output_info, bias, &col2im_output_info, input, weights, deconv_info));
204         ARM_COMPUTE_RETURN_ON_ERROR(CLSlice::validate(&col2im_output_info, output, start_end.first, start_end.second));
205     }
206     else if(is_quantized)
207     {
208         ARM_COMPUTE_RETURN_ON_ERROR(CLDeconvolutionReshapeOutputKernel::validate(&gemm_output_info, bias, &col2im_output_info, input, weights, deconv_info));
209         ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpOutputStage::validate(&col2im_output_info, nullptr, output, output_stage_info));
210     }
211     else
212     {
213         ARM_COMPUTE_RETURN_ON_ERROR(CLDeconvolutionReshapeOutputKernel::validate(&gemm_output_info, bias, output, input, weights, deconv_info));
214     }
215 
216     return Status{};
217 }
218 
configure(const ICLTensor * input,const ICLTensor * weights,const ICLTensor * bias,ICLTensor * output,const PadStrideInfo & deconv_info)219 void CLGEMMDeconvolutionLayer::configure(const ICLTensor *input, const ICLTensor *weights, const ICLTensor *bias, ICLTensor *output, const PadStrideInfo &deconv_info)
220 {
221     configure(CLKernelLibrary::get().get_compile_context(), input, weights, bias, output, deconv_info);
222 }
223 
configure(const CLCompileContext & compile_context,const ICLTensor * input,const ICLTensor * weights,const ICLTensor * bias,ICLTensor * output,const PadStrideInfo & deconv_info)224 void CLGEMMDeconvolutionLayer::configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *weights, const ICLTensor *bias, ICLTensor *output,
225                                          const PadStrideInfo &deconv_info)
226 {
227     ARM_COMPUTE_ERROR_ON_NULLPTR(input, weights, output);
228     ARM_COMPUTE_ERROR_THROW_ON(CLGEMMDeconvolutionLayer::validate(input->info(),
229                                                                   weights->info(),
230                                                                   bias != nullptr ? bias->info() : nullptr,
231                                                                   output->info(),
232                                                                   deconv_info));
233     ARM_COMPUTE_LOG_PARAMS(input, weights, bias, output, deconv_info);
234 
235     _original_weights = weights;
236     _padded_input     = deconv_info.pad_bottom() > 0 || deconv_info.pad_left() > 0 || deconv_info.pad_right() > 0 || deconv_info.pad_top() > 0;
237     _is_nchw          = input->info()->data_layout() == DataLayout::NCHW;
238     _is_quantized     = is_data_type_quantized_asymmetric(input->info()->data_type());
239 
240     const ICLTensor *input_to_use   = input;
241     const ICLTensor *weights_to_use = weights;
242 
243     // If the data layout is NCHW, transform everything in NHWC. Another alternative could be to
244     // do an outer product in NCHW and then an accumulation through a reduction. This would have two
245     // drawbacks: first, the outer product is less efficient than a full GEMM. Second, the reduction
246     // might be slower than GEMM.
247     if(_is_nchw)
248     {
249         _memory_group.manage(&_permuted_input);
250         _permute_input_to_nhwc.configure(compile_context, input, &_permuted_input, PermutationVector(2U, 0U, 1U));
251 
252         _permute_weights_to_nhwc.configure(compile_context, weights, &_permuted_weights, PermutationVector(2U, 0U, 1U));
253 
254         input_to_use   = &_permuted_input;
255         weights_to_use = &_permuted_weights;
256     }
257 
258     // Reshape the input weights. The weights will be reshaped only once during the call to prepare()
259     _reshaped_weights.allocator()->init(TensorInfo(TensorShape(weights_to_use->info()->dimension(0),
260                                                                weights_to_use->info()->dimension(1) * weights_to_use->info()->dimension(2) * weights_to_use->info()->dimension(3)),
261                                                    1,
262                                                    input->info()->data_type(), weights->info()->quantization_info()));
263 
264     _reshape_weights.configure(compile_context, weights_to_use, &_reshaped_weights);
265     _transpose_weights.configure(compile_context, &_reshaped_weights, &_reshaped_weights_t);
266 
267     const size_t idx_h = get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::HEIGHT);
268     GEMMInfo     gemm_info(false, false, true, input->info()->dimension(idx_h), true);
269 
270     // Configure output stage for asymmetric quantized types
271     if(_is_quantized)
272     {
273         // gemmlowp adds the offsets (instead of subtracting them). Thus, we need to negate the original
274         // and restore them back to make it work properly.
275         QuantizationInfo iq_info = input->info()->quantization_info();
276         QuantizationInfo wq_info = weights->info()->quantization_info();
277 
278         input_to_use->info()->set_quantization_info(QuantizationInfo(iq_info.uniform().scale, -iq_info.uniform().offset));
279         _reshaped_weights_t.info()->set_quantization_info(QuantizationInfo(wq_info.uniform().scale, -wq_info.uniform().offset));
280 
281         _mm_gemmlowp.configure(compile_context, input_to_use, &_reshaped_weights_t, nullptr, &_gemm_output, gemm_info);
282 
283         input_to_use->info()->set_quantization_info(iq_info);
284         _reshaped_weights_t.info()->set_quantization_info(wq_info);
285     }
286     else
287     {
288         _mm_gemm.configure(compile_context, input_to_use, &_reshaped_weights_t, nullptr, &_gemm_output, 1.f, 0.0f, gemm_info);
289     }
290 
291     if(_is_nchw)
292     {
293         _permuted_input.allocator()->allocate();
294     }
295 
296     ICLTensor *deconv_reshape_output = nullptr;
297     ICLTensor *slice_output          = nullptr;
298     ICLTensor *output_stage_output   = nullptr;
299 
300     if(_padded_input && _is_quantized)
301     {
302         _memory_group.manage(&_slice_gemm_input);
303         _memory_group.manage(&_gemmlowp_final);
304         deconv_reshape_output = &_gemmlowp_final;
305         output_stage_output   = &_slice_gemm_input;
306         slice_output          = output;
307     }
308     else if(_padded_input)
309     {
310         _memory_group.manage(&_slice_gemm_input);
311         deconv_reshape_output = &_slice_gemm_input;
312         slice_output          = output;
313     }
314     else if(_is_quantized)
315     {
316         _memory_group.manage(&_gemmlowp_final);
317         deconv_reshape_output = &_gemmlowp_final;
318         output_stage_output   = output;
319     }
320     else
321     {
322         deconv_reshape_output = output;
323     }
324 
325     // Configure a Col2Im call to reshape the output of GEMM
326     _deconv_reshape->configure(compile_context, &_gemm_output, bias, deconv_reshape_output, input->info(), weights->info(), deconv_info);
327     _gemm_output.allocator()->allocate();
328 
329     if(_is_quantized)
330     {
331         GEMMLowpOutputStageInfo output_stage_info;
332         construct_gemmlowp_output_stage(input->info(), weights->info(), output->info(), output_stage_info);
333         _gemmlowp_output_stage.configure(compile_context, &_gemmlowp_final, nullptr, output_stage_output, output_stage_info);
334         _gemmlowp_final.allocator()->allocate();
335     }
336 
337     // If the input was padded, the output needs to be sliced.
338     if(_padded_input)
339     {
340         const auto start_end = compute_start_end_slice_coordinates(*deconv_reshape_output->info(), deconv_info, _is_nchw);
341         _slice_gemm.configure(compile_context, &_slice_gemm_input, slice_output, start_end.first, start_end.second);
342         _slice_gemm_input.allocator()->allocate();
343     }
344 }
345 
run()346 void CLGEMMDeconvolutionLayer::run()
347 {
348     prepare();
349 
350     MemoryGroupResourceScope scope_mg(_memory_group);
351 
352     if(_is_nchw)
353     {
354         _permute_input_to_nhwc.run();
355     }
356 
357     if(_is_quantized)
358     {
359         _mm_gemmlowp.run();
360     }
361     else
362     {
363         _mm_gemm.run();
364     }
365 
366     CLScheduler::get().enqueue(*_deconv_reshape, false);
367 
368     if(_is_quantized)
369     {
370         _gemmlowp_output_stage.run();
371     }
372 
373     if(_padded_input)
374     {
375         _slice_gemm.run();
376     }
377 }
378 
prepare()379 void CLGEMMDeconvolutionLayer::prepare()
380 {
381     if(!_is_prepared)
382     {
383         ARM_COMPUTE_ERROR_ON(!_original_weights->is_used());
384 
385         if(_is_nchw)
386         {
387             _permuted_weights.allocator()->allocate();
388             _permute_weights_to_nhwc.run();
389         }
390 
391         _reshaped_weights.allocator()->allocate();
392         _reshape_weights.run();
393 
394         if(_is_nchw)
395         {
396             _permuted_weights.allocator()->free();
397         }
398 
399         _reshaped_weights_t.allocator()->allocate();
400         _transpose_weights.run();
401 
402         // Prepare gemm
403         if(!_is_quantized)
404         {
405             _mm_gemm.prepare();
406         }
407         else
408         {
409             _mm_gemmlowp.prepare();
410         }
411 
412         // Free resources
413         if(!_reshaped_weights_t.is_used())
414         {
415             _reshaped_weights_t.allocator()->free();
416         }
417 
418         _original_weights->mark_as_unused();
419         _is_prepared = true;
420     }
421 }
422 } // namespace arm_compute
423