xref: /aosp_15_r20/external/ComputeLibrary/src/gpu/cl/operators/ClFullyConnected.cpp (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2017-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "src/gpu/cl/operators/ClFullyConnected.h"
25 
26 #include "arm_compute/core/Size2D.h"
27 #include "arm_compute/core/Validate.h"
28 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
29 #include "arm_compute/core/utils/quantization/AsymmHelpers.h"
30 #include "arm_compute/runtime/CL/CLScheduler.h"
31 #include "src/core/CL/kernels/CLFillBorderKernel.h"
32 
33 #include "src/core/helpers/MemoryHelpers.h"
34 #include "src/gpu/cl/operators/ClConvertFullyConnectedWeights.h"
35 #include "src/gpu/cl/operators/ClFlatten.h"
36 #include "src/gpu/cl/operators/ClGemm.h"
37 #include "src/gpu/cl/operators/ClGemmLowpMatrixMultiplyCore.h"
38 #include "src/gpu/cl/operators/ClTranspose.h"
39 #include "src/gpu/cl/utils/ClAuxTensorHandler.h"
40 
41 #include "src/common/utils/Log.h"
42 #include "support/Cast.h"
43 
44 #include <algorithm>
45 
46 namespace arm_compute
47 {
48 namespace opencl
49 {
50 using namespace arm_compute::experimental;
51 using namespace arm_compute::misc::shape_calculator;
52 
53 namespace
54 {
construct_gemmlowp_output_stage(const ITensorInfo & src,const ITensorInfo & weights,const ITensorInfo & dst,GEMMLowpOutputStageInfo & gemmlowp_output_stage,ActivationLayerInfo activation_info)55 Status construct_gemmlowp_output_stage(const ITensorInfo &src, const ITensorInfo &weights, const ITensorInfo &dst,
56                                        GEMMLowpOutputStageInfo &gemmlowp_output_stage, ActivationLayerInfo activation_info)
57 {
58     gemmlowp_output_stage.type                = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
59     gemmlowp_output_stage.gemmlowp_offset     = 0;
60     gemmlowp_output_stage.gemmlowp_multiplier = 0;
61     gemmlowp_output_stage.gemmlowp_shift      = 0;
62 
63     const auto data_type = src.data_type();
64 
65     // Configure output stage for quantized case
66     if(is_data_type_quantized_asymmetric(data_type))
67     {
68         const QuantizationInfo        oq_info = dst.quantization_info();
69         const UniformQuantizationInfo iq_unif = src.quantization_info().uniform();
70         const UniformQuantizationInfo wq_unif = weights.quantization_info().uniform();
71         const UniformQuantizationInfo oq_unif = oq_info.uniform();
72 
73         const auto output_quant_info = (dst.total_size() == 0) ? iq_unif : oq_unif;
74 
75         const float multiplier        = (iq_unif.scale * wq_unif.scale) / output_quant_info.scale;
76         int         output_multiplier = 0;
77         int         output_shift      = 0;
78         ARM_COMPUTE_RETURN_ON_ERROR(quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift));
79 
80         PixelValue type_min{};
81         PixelValue type_max{};
82         std::tie(type_min, type_max) = get_min_max(data_type);
83 
84         if(activation_info.enabled())
85         {
86             std::tie(type_min, type_max) = get_quantized_activation_min_max(activation_info, data_type, output_quant_info);
87         }
88 
89         // Set the GEMMLowp output stage info
90         gemmlowp_output_stage.gemmlowp_offset     = output_quant_info.offset;
91         gemmlowp_output_stage.gemmlowp_multiplier = output_multiplier;
92         gemmlowp_output_stage.gemmlowp_shift      = output_shift;
93         gemmlowp_output_stage.gemmlowp_multipliers.push_back(output_multiplier);
94         gemmlowp_output_stage.gemmlowp_shifts.push_back(output_shift);
95         type_min.get(gemmlowp_output_stage.gemmlowp_min_bound);
96         type_max.get(gemmlowp_output_stage.gemmlowp_max_bound);
97     }
98 
99     return Status{};
100 }
101 
validate_mm(const ITensorInfo & src,const ITensorInfo & weights,const ITensorInfo * bias,const ITensorInfo & dst,const FullyConnectedLayerInfo & fc_info)102 Status validate_mm(const ITensorInfo &src, const ITensorInfo &weights, const ITensorInfo *bias, const ITensorInfo &dst, const FullyConnectedLayerInfo &fc_info)
103 {
104     GEMMLowpOutputStageInfo gemmlowp_output_stage;
105     ARM_COMPUTE_RETURN_ON_ERROR(construct_gemmlowp_output_stage(src, weights, dst, gemmlowp_output_stage, fc_info.activation_info));
106 
107     const GEMMInfo &gemm_info = GEMMInfo(false,                           // is_a_reshaped
108                                          false,                           // is_b_reshaped
109                                          true,                            // reshape_b_only_on_first_run
110                                          0,                               // depth_output_gemm3d
111                                          false,                           // reinterpret_input_as_3d
112                                          fc_info.retain_internal_weights, // retain_internal_weights
113                                          gemmlowp_output_stage,           // gemmlowp_output_stage
114                                          fc_info.fp_mixed_precision,      // fp_mixed_precision
115                                          false,                           // fast_math
116                                          true,                            // broadcast_bias
117                                          ActivationLayerInfo());          // activation_info
118 
119     if(is_data_type_quantized_asymmetric(src.data_type()))
120     {
121         const UniformQuantizationInfo iq_info = src.quantization_info().uniform();
122         const UniformQuantizationInfo wq_info = weights.quantization_info().uniform();
123 
124         // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
125         // Extract and negate src and weights offset
126         const QuantizationInfo src_quantization_info(iq_info.scale, -iq_info.offset);
127         const QuantizationInfo weights_quantization_info(wq_info.scale, -wq_info.offset);
128 
129         // Validate gemmlowp function
130         ARM_COMPUTE_RETURN_ON_ERROR(ClGemmLowpMatrixMultiplyCore::validate(&src.clone()->set_quantization_info(src_quantization_info),
131                                                                            &weights.clone()->set_quantization_info(weights_quantization_info),
132                                                                            bias,
133                                                                            &dst,
134                                                                            gemm_info));
135     }
136     else
137     {
138         ARM_COMPUTE_RETURN_ON_ERROR(ClGemm::validate(&src, &weights, bias, &dst, 1.f, 1.f, gemm_info));
139     }
140 
141     return Status{};
142 }
143 } // namespace
144 
ClFullyConnected()145 ClFullyConnected::ClFullyConnected()
146     : _convert_weights(nullptr),
147       _flatten(nullptr),
148       _reshape_weights(nullptr),
149       _mm_gemm(nullptr),
150       _mm_gemmlowp(nullptr),
151       _aux_mem(Count)
152 {
153 }
154 
155 ClFullyConnected::~ClFullyConnected() = default;
156 
configure_mm(const CLCompileContext & compile_context,ITensorInfo * src,ITensorInfo * weights,ITensorInfo * bias,ITensorInfo * dst,const FullyConnectedLayerInfo & fc_info)157 void ClFullyConnected::configure_mm(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *bias, ITensorInfo *dst,
158                                     const FullyConnectedLayerInfo &fc_info)
159 {
160     GEMMLowpOutputStageInfo gemmlowp_output_stage;
161     construct_gemmlowp_output_stage(*src, *weights, *dst, gemmlowp_output_stage, fc_info.activation_info);
162 
163     const GEMMInfo &gemm_info = GEMMInfo(false,                           // is_a_reshaped
164                                          false,                           // is_b_reshaped
165                                          true,                            // reshape_b_only_on_first_run
166                                          0,                               // depth_output_gemm3d
167                                          false,                           // reinterpret_input_as_3d
168                                          fc_info.retain_internal_weights, // retain_internal_weights
169                                          gemmlowp_output_stage,           // gemmlowp_output_stage
170                                          fc_info.fp_mixed_precision,      // fp_mixed_precision
171                                          false,                           // fast_math
172                                          true,                            // broadcast_bias
173                                          fc_info.activation_info);        // activation_info
174 
175     if(_is_quantized)
176     {
177         // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
178         // Extract and negate input and weights offset
179         const QuantizationInfo src_quantization_info     = src->quantization_info();
180         const QuantizationInfo weights_quantization_info = weights->quantization_info();
181 
182         TensorInfo src_info     = src->clone()->set_quantization_info(src_quantization_info);
183         TensorInfo weights_info = weights->clone()->set_quantization_info(weights_quantization_info);
184 
185         src_info.set_quantization_info(QuantizationInfo(src_quantization_info.uniform().scale, -src_quantization_info.uniform().offset));
186         weights_info.set_quantization_info(QuantizationInfo(weights_quantization_info.uniform().scale, -weights_quantization_info.uniform().offset));
187 
188         // Configure gemmlowp function
189         _mm_gemmlowp = std::make_unique<ClGemmLowpMatrixMultiplyCore>();
190         _mm_gemmlowp->configure(compile_context, &src_info, &weights_info, bias, dst, gemm_info);
191     }
192     else
193     {
194         // Configure matrix multiply kernel
195         _mm_gemm = std::make_unique<ClGemm>();
196         _mm_gemm->configure(compile_context, src, weights, bias, dst, 1.f, 1.f, gemm_info);
197     }
198 }
199 
configure_conv_fc(const CLCompileContext & compile_context,ITensorInfo * src,ITensorInfo * weights,ITensorInfo * bias,ITensorInfo * dst,const FullyConnectedLayerInfo & fc_info)200 void ClFullyConnected::configure_conv_fc(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *bias, ITensorInfo *dst,
201                                          const FullyConnectedLayerInfo &fc_info)
202 {
203     ARM_COMPUTE_ERROR_ON((weights->dimension(1) != (src->dimension(0) * src->dimension(1) * src->dimension(2))));
204 
205     // If the fully connected layer is called after a convolution layer, the input tensor must be linearized
206 
207     // Initialize output tensor for flatten
208     _flattened_src = src->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_flatten_shape(src)).set_data_layout(DataLayout::NCHW);
209 
210     // Configure flatten kernel
211     _flatten = std::make_unique<ClFlatten>();
212     _flatten->configure(compile_context, src, &_flattened_src);
213 
214     // Configure matrix multiply kernel
215     configure_mm(compile_context, &_flattened_src, weights, bias, dst, fc_info);
216 }
217 
configure_fc_fc(const CLCompileContext & compile_context,ITensorInfo * src,ITensorInfo * weights,ITensorInfo * bias,ITensorInfo * dst,const FullyConnectedLayerInfo & fc_info)218 void ClFullyConnected::configure_fc_fc(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *bias, ITensorInfo *dst,
219                                        const FullyConnectedLayerInfo &fc_info)
220 {
221     ARM_COMPUTE_ERROR_ON(src->dimension(0) != weights->dimension(1));
222 
223     // Configure matrix multiply kernel
224     configure_mm(compile_context, src, weights, bias, dst, fc_info);
225 }
226 
configure(const CLCompileContext & compile_context,ITensorInfo * src,ITensorInfo * weights,ITensorInfo * biases,ITensorInfo * dst,FullyConnectedLayerInfo fc_info)227 void ClFullyConnected::configure(const CLCompileContext &compile_context, ITensorInfo *src, ITensorInfo *weights, ITensorInfo *biases, ITensorInfo *dst,
228                                  FullyConnectedLayerInfo fc_info)
229 {
230     ARM_COMPUTE_ERROR_ON_NULLPTR(src, weights, dst);
231 
232     // Perform validate step
233     ARM_COMPUTE_ERROR_THROW_ON(ClFullyConnected::validate(src, weights, biases, dst, fc_info));
234     ARM_COMPUTE_LOG_PARAMS(src, weights, biases, dst, fc_info);
235 
236     _are_weights_converted = true;
237     _are_weights_reshaped  = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true;
238     _is_fc_after_conv      = true;
239     _is_quantized          = is_data_type_quantized_asymmetric(src->data_type());
240     _is_prepared           = fc_info.retain_internal_weights;
241     _weights_to_use        = TensorInfo(*weights);
242     _weights_to_use_idx    = ACL_SRC_1;
243 
244     // With the Fully Connected layer we can have 4 different cases:
245     //  1) Convolution layer -> Fully Connected layer without batches
246     //  2) Fully Connected layer -> Fully Connected layer without batches
247     //  3) Convolution layer -> Fully Connected layer with batches
248     //  4) Fully Connected layer -> Fully Connected layer with batches
249 
250     // Check if we have a fully connected layer with batches
251     const bool is_batched_fc_layer = dst->dimension(1) > 1;
252     if(is_batched_fc_layer)
253     {
254         _is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(src->tensor_shape().cbegin() + 3,
255                                                                                   src->tensor_shape().cend(),
256                                                                                   dst->tensor_shape().cbegin() + 1));
257     }
258     else
259     {
260         _is_fc_after_conv = src->num_dimensions() > 1;
261     }
262 
263     ITensorInfo *weights_used = weights;
264 
265     // Reshape weights if needed
266     if(!_are_weights_reshaped)
267     {
268         // Reshape the weights
269         _reshape_weights = std::make_unique<ClTranspose>();
270         _reshape_weights->configure(compile_context, weights, &_reshaped_weights);
271         weights_used        = &_reshaped_weights;
272         _weights_to_use_idx = offset_int_vec(TransposedWeights);
273     }
274 
275     // Convert weights if needed
276     if(_is_fc_after_conv && (src->data_layout() != fc_info.weights_trained_layout))
277     {
278         // Convert weights
279         _convert_weights = std::make_unique<ClConvertFullyConnectedWeights>();
280         _convert_weights->configure(compile_context,
281                                     weights_used,
282                                     &_converted_weights,
283                                     src->tensor_shape(),
284                                     fc_info.weights_trained_layout);
285 
286         weights_used           = &_converted_weights;
287         _weights_to_use_idx    = offset_int_vec(ConvertedWeights);
288         _are_weights_converted = false;
289     }
290 
291     if(_is_fc_after_conv)
292     {
293         // Fully Connected layer after a Convolution Layer without batches
294         configure_conv_fc(compile_context, src, weights_used, biases, dst, fc_info);
295     }
296     else
297     {
298         // Fully Connected layer after a Fully Connected Layer without batches
299         configure_fc_fc(compile_context, src, weights_used, biases, dst, fc_info);
300     }
301     // Update TensorInfo of final weights used (Need to be done in the end due to padding expansion)
302     _weights_to_use = *weights_used;
303 
304     // Set auxiliary memory requirements
305     auto gemm_mem_req = (_is_quantized) ? _mm_gemmlowp->workspace() : _mm_gemm->workspace();
306     for(unsigned int i = 0; i < gemm_mem_req.size(); ++i)
307     {
308         _aux_mem[i] = gemm_mem_req[i];
309     }
310     if(_aux_mem[1].size > 0 || _aux_mem[2].size > 0) // Persistent weights memory on GEMMs
311     {
312         // Release permuted weights at the of prepare as they are further transposed by the assembly dispatch
313         _aux_mem[TransposedWeights] = MemoryInfo(offset_int_vec(TransposedWeights), MemoryLifetime::Prepare, _reshaped_weights.total_size());
314         _aux_mem[ConvertedWeights]  = MemoryInfo(offset_int_vec(ConvertedWeights), MemoryLifetime::Prepare, _converted_weights.total_size());
315     }
316     else
317     {
318         // Release permuted weights at the of prepare as they are further transposed by the assembly dispatch
319         const auto transposed_wei_lft = (_weights_to_use_idx == offset_int_vec(TransposedWeights)) ? MemoryLifetime::Persistent : MemoryLifetime::Prepare;
320         const auto converted_wei_lft  = (_weights_to_use_idx == offset_int_vec(ConvertedWeights)) ? MemoryLifetime::Persistent : MemoryLifetime::Prepare;
321 
322         _aux_mem[TransposedWeights] = MemoryInfo(offset_int_vec(TransposedWeights), transposed_wei_lft, _reshaped_weights.total_size());
323         _aux_mem[ConvertedWeights]  = MemoryInfo(offset_int_vec(ConvertedWeights), converted_wei_lft, _converted_weights.total_size());
324     }
325     _aux_mem[FlattenedSrc] = MemoryInfo(offset_int_vec(FlattenedSrc), MemoryLifetime::Temporary, _flattened_src.total_size());
326 }
327 
validate(const ITensorInfo * src,const ITensorInfo * weights,const ITensorInfo * biases,const ITensorInfo * dst,FullyConnectedLayerInfo fc_info)328 Status ClFullyConnected::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst,
329                                   FullyConnectedLayerInfo fc_info)
330 {
331     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, weights, dst);
332     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
333     ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, weights, dst);
334     ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 2);
335     ARM_COMPUTE_RETURN_ERROR_ON(fc_info.activation_info.enabled() && is_data_type_quantized(src->data_type()) && fc_info.activation_info.activation() != ActivationLayerInfo::ActivationFunction::RELU
336                                 && fc_info.activation_info.activation() != ActivationLayerInfo::ActivationFunction::BOUNDED_RELU && fc_info.activation_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU);
337     ARM_COMPUTE_RETURN_ERROR_ON(!weights->are_values_constant() && (!fc_info.are_weights_reshaped || fc_info.transpose_weights));
338 
339     bool weights_reshaped = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true;
340     bool is_fc_after_conv = true;
341 
342     const ITensorInfo &flatten_src       = TensorInfo(src->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_flatten_shape(src)).set_data_layout(DataLayout::NCHW));
343     const ITensorInfo &reshaped_weights  = TensorInfo(weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_transposed_shape(*weights)));
344     const ITensorInfo &converted_weights = weights_reshaped ? TensorInfo(weights->clone()->set_is_resizable(true).reset_padding()) : TensorInfo(*reshaped_weights.clone());
345 
346     // With the Fully Connected layer we can have 4 different cases:
347     //  1) Convolution layer -> Fully Connected layer without batches
348     //  2) Fully Connected layer -> Fully Connected layer without batches
349     //  3) Convolution layer -> Fully Connected layer with batches
350     //  4) Fully Connected layer -> Fully Connected layer with batches
351 
352     const ITensorInfo *src_to_use     = src;
353     const ITensorInfo *weights_to_use = weights;
354 
355     if(biases != nullptr)
356     {
357         ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
358         if(is_data_type_quantized(src->data_type()))
359         {
360             ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::S32);
361         }
362         else
363         {
364             ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, biases);
365         }
366     }
367 
368     // Check if we have a fully connected layer with batches
369     const bool is_batched_fc_layer = dst->dimension(1) > 1;
370     if(is_batched_fc_layer)
371     {
372         is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(src->tensor_shape().cbegin() + 3,
373                                                                                  src->tensor_shape().cend(),
374                                                                                  dst->tensor_shape().cbegin() + 1));
375     }
376     else
377     {
378         is_fc_after_conv = src->num_dimensions() > 1;
379     }
380 
381     if(!weights_reshaped)
382     {
383         // Validate reshape weights kernel
384         ARM_COMPUTE_RETURN_ON_ERROR(ClTranspose::validate(weights, &reshaped_weights));
385         weights_to_use = &reshaped_weights;
386     }
387 
388     if(is_fc_after_conv && (src->data_layout() != fc_info.weights_trained_layout))
389     {
390         // Validate convert weights kernel
391         ARM_COMPUTE_RETURN_ON_ERROR(ClConvertFullyConnectedWeights::validate(weights_to_use,
392                                                                              &converted_weights,
393                                                                              src->tensor_shape(),
394                                                                              fc_info.weights_trained_layout));
395         weights_to_use = &converted_weights;
396     }
397 
398     if(is_fc_after_conv)
399     {
400         // Fully Connected layer after a Convolution Layer without batches
401         ARM_COMPUTE_RETURN_ERROR_ON((weights_to_use->dimension(1) != (src->dimension(0) * src->dimension(1) * src->dimension(2))));
402 
403         // Validate flatten kernel
404         ARM_COMPUTE_RETURN_ON_ERROR(ClFlatten::validate(src, &flatten_src));
405         src_to_use = &flatten_src;
406     }
407     else
408     {
409         // Fully Connected layer after a Fully Connected Layer without batches
410         ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(0) != weights_to_use->dimension(1));
411     }
412 
413     // Validate matrix multiply kernel
414     ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(*src_to_use, *weights_to_use, biases, *dst, fc_info));
415 
416     return Status{};
417 }
418 
run(ITensorPack & tensors)419 void ClFullyConnected::run(ITensorPack &tensors)
420 {
421     prepare(tensors);
422 
423     auto src = tensors.get_const_tensor(ACL_SRC_0);
424 
425     CLAuxTensorHandler flattened_src(offset_int_vec(FlattenedSrc), _flattened_src, tensors, false);
426     CLAuxTensorHandler weights(_weights_to_use_idx, _weights_to_use, tensors, false);
427 
428     // Linearize input if it comes from a convolutional layer
429     if(_is_fc_after_conv)
430     {
431         ITensorPack flatten_pack{ { ACL_SRC, src }, { ACL_DST, flattened_src.get() } };
432         _flatten->run(flatten_pack);
433     }
434 
435     ITensorPack gemm_pack = tensors;
436     gemm_pack.add_const_tensor(ACL_SRC_0, (_is_fc_after_conv) ? flattened_src.get() : src);
437     if(_weights_to_use_idx != ACL_SRC_1)
438     {
439         gemm_pack.add_const_tensor(ACL_SRC_1, weights.get());
440     }
441 
442     // Run matrix multiply
443     if(_is_quantized)
444     {
445         _mm_gemmlowp->run(gemm_pack);
446     }
447     else
448     {
449         _mm_gemm->run(gemm_pack);
450     }
451 }
452 
prepare(ITensorPack & tensors)453 void ClFullyConnected::prepare(ITensorPack &tensors)
454 {
455     if(!_is_prepared)
456     {
457         auto weights = tensors.get_const_tensor(ACL_SRC_1);
458 
459         CLAuxTensorHandler reshaped_weights(offset_int_vec(TransposedWeights), _reshaped_weights, tensors, false);
460         CLAuxTensorHandler converted_weights(offset_int_vec(ConvertedWeights), _converted_weights, tensors, false);
461 
462         // Pointer to current weights
463         const ITensor *cur_weights = weights;
464 
465         // Reshape of the weights if needed (happens only once)
466         if(!_are_weights_reshaped)
467         {
468             // Run reshape weights kernel and mark weights as unused
469             ITensorPack transpose_pack{ { ACL_SRC, weights }, { ACL_DST, reshaped_weights.get() } };
470             _reshape_weights->run(transpose_pack);
471 
472             cur_weights->mark_as_unused();
473             cur_weights = reshaped_weights.get();
474 
475             _are_weights_reshaped = true;
476         }
477 
478         // Convert weights if needed (happens only once)
479         if(!_are_weights_converted)
480         {
481             ITensorPack convert_pack{ { ACL_SRC, cur_weights }, { ACL_DST, converted_weights.get() } };
482             _convert_weights->run(convert_pack);
483 
484             cur_weights->mark_as_unused();
485             cur_weights = converted_weights.get();
486 
487             _are_weights_converted = true;
488         }
489 
490         tensors.add_const_tensor(ACL_SRC_1, cur_weights);
491 
492         // Prepare GEMM prepare and release unused weights
493         if(!_is_quantized)
494         {
495             _mm_gemm->prepare(tensors);
496         }
497         else
498         {
499             _mm_gemmlowp->prepare(tensors);
500         }
501         _is_prepared = true;
502     }
503 }
504 
workspace() const505 experimental::MemoryRequirements ClFullyConnected::workspace() const
506 {
507     return _aux_mem;
508 }
509 } // namespace opencl
510 } // namespace arm_compute
511