xref: /aosp_15_r20/external/ComputeLibrary/src/cpu/operators/CpuFullyConnected.cpp (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2021-2023 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "src/cpu/operators/CpuFullyConnected.h"
25 
26 #include "arm_compute/core/Helpers.h"
27 #include "arm_compute/core/ITensorPack.h"
28 #include "arm_compute/core/Validate.h"
29 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
30 #include "arm_compute/core/utils/quantization/AsymmHelpers.h"
31 #include "arm_compute/runtime/NEON/NEScheduler.h"
32 #include "src/common/utils/Log.h"
33 #include "src/core/helpers/AutoConfiguration.h"
34 #include "src/core/helpers/MemoryHelpers.h"
35 #include "src/cpu/kernels/CpuTransposeKernel.h"
36 #include "src/cpu/operators/CpuConvertFullyConnectedWeights.h"
37 #include "src/cpu/operators/CpuFlatten.h"
38 #include "src/cpu/operators/CpuGemm.h"
39 #include "src/cpu/operators/CpuGemmLowpMatrixMultiplyCore.h"
40 #include "src/cpu/utils/CpuAuxTensorHandler.h"
41 
42 namespace arm_compute
43 {
44 namespace cpu
45 {
46 using namespace arm_compute::experimental;
47 using namespace arm_compute::misc::shape_calculator;
48 
49 namespace
50 {
51 // Get min, max bound of a quantized asymmetric dst tensor, with the effect of fused activation
get_quantized_asymmetric_output_min_max(const QuantizationInfo & q_info,const ActivationLayerInfo & act_info,DataType data_type)52 std::pair<PixelValue, PixelValue> get_quantized_asymmetric_output_min_max(const QuantizationInfo &q_info, const ActivationLayerInfo &act_info, DataType data_type)
53 {
54     PixelValue type_min{};
55     PixelValue type_max{};
56     std::tie(type_min, type_max)         = get_min_max(data_type);
57     const UniformQuantizationInfo q_unif = q_info.uniform();
58 
59     if(act_info.enabled())
60     {
61         switch(act_info.activation())
62         {
63             case ActivationLayerInfo::ActivationFunction::RELU:
64                 type_min = PixelValue(q_unif.offset);
65                 break;
66             case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
67                 type_min = PixelValue(q_unif.offset);
68                 type_max = PixelValue(act_info.a(), data_type, q_info);
69                 break;
70             case ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU:
71                 type_min = PixelValue(act_info.b(), data_type, q_info);
72                 type_max = PixelValue(act_info.a(), data_type, q_info);
73                 break;
74             default:
75                 ARM_COMPUTE_ERROR("Activation function not supported.");
76                 break;
77         }
78     }
79 
80     return std::make_pair(type_min, type_max);
81 }
82 
get_gemmlowp_output_stage_info(const ITensorInfo * src,const ITensorInfo * weights,const ITensorInfo * dst,const ActivationLayerInfo & act,GEMMLowpOutputStageInfo & gemmlowp_output_stage_info)83 Status get_gemmlowp_output_stage_info(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *dst, const ActivationLayerInfo &act,
84                                       GEMMLowpOutputStageInfo &gemmlowp_output_stage_info)
85 {
86     const auto                    data_type = src->data_type();
87     const QuantizationInfo        oq_info   = dst->quantization_info();
88     const UniformQuantizationInfo iq_unif   = src->quantization_info().uniform();
89     const UniformQuantizationInfo wq_unif   = weights->quantization_info().uniform();
90     const UniformQuantizationInfo oq_unif   = oq_info.uniform();
91 
92     float   multiplier = (iq_unif.scale * wq_unif.scale) / oq_unif.scale;
93     int32_t output_multiplier;
94     int32_t output_shift;
95 
96     ARM_COMPUTE_RETURN_ON_ERROR(quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift));
97 
98     PixelValue type_min{};
99     PixelValue type_max{};
100     std::tie(type_min, type_max) = get_quantized_asymmetric_output_min_max(oq_info, act, data_type);
101 
102     gemmlowp_output_stage_info.gemmlowp_multiplier = output_multiplier;
103     gemmlowp_output_stage_info.gemmlowp_shift      = output_shift;
104     gemmlowp_output_stage_info.gemmlowp_offset     = oq_unif.offset;
105     gemmlowp_output_stage_info.type                = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
106     gemmlowp_output_stage_info.gemmlowp_min_bound  = type_min.get<int32_t>();
107     gemmlowp_output_stage_info.gemmlowp_max_bound  = type_max.get<int32_t>();
108 
109     return Status{};
110 }
111 
validate_mm(const ITensorInfo * src,const ITensorInfo * weights,const ITensorInfo * biases,const ITensorInfo * dst,const ActivationLayerInfo & act,bool enable_fast_math,WeightFormat weight_format)112 Status validate_mm(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const ActivationLayerInfo &act, bool enable_fast_math, WeightFormat weight_format)
113 {
114     if(is_data_type_quantized_asymmetric(src->data_type()))
115     {
116         // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
117         // Extract and negate src and weights offset
118         const QuantizationInfo src_quantization_info(src->quantization_info().uniform().scale, -src->quantization_info().uniform().offset);
119         const QuantizationInfo weights_quantization_info(weights->quantization_info().uniform().scale, -weights->quantization_info().uniform().offset);
120 
121         GEMMLowpOutputStageInfo gemmlowp_output_stage_info;
122         ARM_COMPUTE_RETURN_ON_ERROR(get_gemmlowp_output_stage_info(src, weights, dst, act, gemmlowp_output_stage_info));
123 
124         GEMMInfo gemm_info;
125         gemm_info.set_gemmlowp_output_stage(gemmlowp_output_stage_info);
126         gemm_info.set_fast_math(enable_fast_math);
127 
128         // Validate gemmlowp function
129         TensorInfo src_info     = src->clone()->set_quantization_info(src_quantization_info);
130         TensorInfo weights_info = weights->clone()->set_quantization_info(weights_quantization_info);
131         ARM_COMPUTE_RETURN_ON_ERROR(CpuGemmLowpMatrixMultiplyCore::validate(&src_info,
132                                                                             &weights_info,
133                                                                             biases,
134                                                                             dst,
135                                                                             gemm_info));
136     }
137     else
138     {
139         GEMMInfo gemm_info(false, false, true /* Reshape weights only for the first run */);
140         gemm_info.set_weight_format(weight_format);
141         gemm_info.set_fixed_format(weight_format != WeightFormat::UNSPECIFIED);
142         gemm_info.set_fast_math(enable_fast_math);
143         ARM_COMPUTE_RETURN_ON_ERROR(CpuGemm::validate(src, weights, biases, dst, 1.f, 1.0f, gemm_info));
144     }
145 
146     return Status{};
147 }
148 } // namespace
149 
CpuFullyConnected()150 CpuFullyConnected::CpuFullyConnected()
151     : _flatten(nullptr),
152       _convert_weights(nullptr),
153       _transpose_weights(nullptr),
154       _mm_gemm(nullptr),
155       _mm_gemmlowp(nullptr),
156       _flattened_src(),
157       _converted_weights(),
158       _reshaped_weights(),
159       _trans_weights(),
160       _trans_weights_idx(AuxTensorIdx::Count),
161       _aux_mem(Count),
162       _needs_weights_conversion(false),
163       _needs_weights_reshape(false),
164       _is_fc_after_conv(false),
165       _is_quantized_asymmetric(false),
166       _is_prepared(false),
167       _enable_fast_math(false),
168       _fixed_format(false),
169       _weight_format(arm_compute::WeightFormat::UNSPECIFIED)
170 {
171 }
172 
173 CpuFullyConnected::~CpuFullyConnected() = default;
174 
configure_mm(const ITensorInfo * src,const ITensorInfo * weights,const ITensorInfo * biases,ITensorInfo * dst,const ActivationLayerInfo & act)175 void CpuFullyConnected::configure_mm(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const ActivationLayerInfo &act)
176 {
177     if(_is_quantized_asymmetric)
178     {
179         // Since we need negative offsets for computing convolution, we need to change QuantizationInfo()
180         // Extract and negate src and weights offset
181         const QuantizationInfo src_quantization_info(src->quantization_info().uniform().scale, -src->quantization_info().uniform().offset);
182         const QuantizationInfo weights_quantization_info(weights->quantization_info().uniform().scale, -weights->quantization_info().uniform().offset);
183 
184         TensorInfo src_info     = src->clone()->set_quantization_info(src_quantization_info);
185         TensorInfo weights_info = weights->clone()->set_quantization_info(weights_quantization_info);
186 
187         // Configure gemmlowp function and output stage for asymmetric quantized types
188         GEMMLowpOutputStageInfo gemmlowp_output_stage_info;
189         const Status            status = get_gemmlowp_output_stage_info(&src_info, &weights_info, dst, act, gemmlowp_output_stage_info);
190         ARM_COMPUTE_ERROR_ON(status.error_code() != ErrorCode::OK);
191 
192         GEMMInfo gemm_info;
193         gemm_info.set_gemmlowp_output_stage(gemmlowp_output_stage_info);
194         gemm_info.set_activation_info(act);
195         gemm_info.set_fast_math(_enable_fast_math);
196         _mm_gemmlowp = std::make_unique<CpuGemmLowpMatrixMultiplyCore>();
197         _mm_gemmlowp->configure(&src_info, &weights_info, biases, dst, gemm_info);
198     }
199     else
200     {
201         // Configure matrix multiply kernel
202         GEMMInfo gemm_info(false, false, true /* Reshape weights only for the first run */);
203         gemm_info.set_activation_info(act);
204         gemm_info.set_fast_math(_enable_fast_math);
205         gemm_info.set_fixed_format(_fixed_format);
206         gemm_info.set_weight_format(_weight_format);
207         _mm_gemm = std::make_unique<CpuGemm>();
208         _mm_gemm->configure(src, weights, biases, dst, 1.f, 1.0f, gemm_info);
209     }
210 }
211 
configure_conv_fc(const ITensorInfo * src,const ITensorInfo * weights,const ITensorInfo * biases,ITensorInfo * dst,const ActivationLayerInfo & act)212 void CpuFullyConnected::configure_conv_fc(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const ActivationLayerInfo &act)
213 {
214     ARM_COMPUTE_ERROR_ON((weights->dimension(1) != (src->dimension(0) * src->dimension(1) * src->dimension(2))));
215 
216     // If the fully connected layer is called after a convolution layer, the src tensor must be linearized
217 
218     // Initialize output tensor for flatten
219     auto_init_if_empty(_flattened_src, src->clone()->set_tensor_shape(compute_flatten_shape(src)));
220 
221     _flatten = std::make_unique<CpuFlatten>();
222     _flatten->configure(src, &_flattened_src);
223 
224     // Configure matrix multiply kernel
225     configure_mm(&_flattened_src, weights, biases, dst, act);
226 }
227 
configure_fc_fc(const ITensorInfo * src,const ITensorInfo * weights,const ITensorInfo * biases,ITensorInfo * dst,const ActivationLayerInfo & act)228 void CpuFullyConnected::configure_fc_fc(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const ActivationLayerInfo &act)
229 {
230     ARM_COMPUTE_ERROR_ON(src->dimension(0) != weights->dimension(1));
231 
232     // Configure matrix multiply kernel
233     configure_mm(src, weights, biases, dst, act);
234 }
235 
configure(const ITensorInfo * src,const ITensorInfo * weights,const ITensorInfo * biases,ITensorInfo * dst,FullyConnectedLayerInfo fc_info,const WeightsInfo & weights_info)236 void CpuFullyConnected::configure(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst,
237                                   FullyConnectedLayerInfo fc_info, const WeightsInfo &weights_info)
238 {
239     // Perform validate step
240     ARM_COMPUTE_ERROR_ON_NULLPTR(src, weights, dst);
241     ARM_COMPUTE_ERROR_THROW_ON(CpuFullyConnected::validate(src,
242                                                            weights,
243                                                            biases != nullptr ? biases : nullptr,
244                                                            dst,
245                                                            fc_info,
246                                                            weights_info));
247     ARM_COMPUTE_LOG_PARAMS(src, weights, biases, dst, fc_info);
248 
249     _needs_weights_conversion = false;
250     _needs_weights_reshape    = fc_info.transpose_weights ? !fc_info.are_weights_reshaped : false;
251     _needs_weights_reshape    = _needs_weights_reshape && !fc_info.retain_internal_weights;
252     _is_fc_after_conv         = true;
253     _is_quantized_asymmetric  = is_data_type_quantized_asymmetric(src->data_type());
254     _is_prepared              = false;
255     _trans_weights_idx        = AuxTensorIdx::Count;
256     _enable_fast_math         = fc_info.enable_fast_math;
257     _fixed_format             = weights_info.weight_format() != WeightFormat::UNSPECIFIED;
258     _weight_format            = weights_info.weight_format();
259 
260     // With the Fully Connected layer we can have 4 different cases:
261     //  1) Convolution layer -> Fully Connected layer without batches
262     //  2) Fully Connected layer -> Fully Connected layer without batches
263     //  3) Convolution layer -> Fully Connected layer with batches
264     //  4) Fully Connected layer -> Fully Connected layer with batches
265 
266     const ITensorInfo *weights_to_use = weights;
267 
268     // Check if we have a fully connected layer with batches
269     const bool is_batched_fc_layer = dst->dimension(1) > 1;
270     if(is_batched_fc_layer)
271     {
272         _is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(src->tensor_shape().cbegin() + 3, src->tensor_shape().cend(), dst->tensor_shape().cbegin() + 1));
273     }
274     else
275     {
276         _is_fc_after_conv = src->num_dimensions() > 1;
277     }
278 
279     // Reshape weights if needed
280     if(_needs_weights_reshape)
281     {
282         // Reshape the weights
283         _transpose_weights = std::make_unique<kernels::CpuTransposeKernel>();
284         _transpose_weights->configure(weights, &_reshaped_weights);
285         weights_to_use     = &_reshaped_weights;
286         _trans_weights_idx = AuxTensorIdx::TransposedWeights;
287     }
288 
289     // Convert weights if needed
290     if(_is_fc_after_conv && (src->data_layout() != fc_info.weights_trained_layout))
291     {
292         // Convert weights
293         _convert_weights = std::make_unique<CpuConvertFullyConnectedWeights>();
294         _convert_weights->configure(weights_to_use,
295                                     &_converted_weights,
296                                     src->tensor_shape(),
297                                     fc_info.weights_trained_layout);
298 
299         weights_to_use            = &_converted_weights;
300         _needs_weights_conversion = true;
301         _trans_weights_idx        = AuxTensorIdx::ConvertedWeights;
302     }
303 
304     if(_is_fc_after_conv)
305     {
306         // Fully Connected layer after a Convolution Layer without batches
307         configure_conv_fc(src, weights_to_use, biases, dst, fc_info.activation_info);
308     }
309     else
310     {
311         // Fully Connected layer after a Fully Connected Layer without batches
312         configure_fc_fc(src, weights_to_use, biases, dst, fc_info.activation_info);
313     }
314 
315     // Retain the tensorinfo with the weights to use
316     if(_needs_weights_reshape || _needs_weights_conversion)
317     {
318         _trans_weights = *weights_to_use;
319     }
320 
321     // Set auxiliary memory requirements
322     auto gemm_mem_req = (_is_quantized_asymmetric) ? _mm_gemmlowp->workspace() : _mm_gemm->workspace();
323     for(unsigned int i = 0; i < gemm_mem_req.size(); ++i)
324     {
325         _aux_mem[i] = gemm_mem_req[i];
326     }
327 
328     if(_aux_mem[Pretranspose].size > 0)
329     {
330         // Release permuted weights at the end of prepare as they are further transposed by the assembly dispatch
331         // Do not release them if biases are dynamic and data type is quantized, since the weights tensor will be used for biases offset calculation
332         _aux_mem[TransposedWeights] = MemoryInfo(offset_int_vec(TransposedWeights), (_is_quantized_asymmetric && biases
333                                                                                      && !(biases->are_values_constant())) ? MemoryLifetime::Persistent : MemoryLifetime::Prepare,
334                                                  _reshaped_weights.total_size());
335         _aux_mem[ConvertedWeights]  = MemoryInfo(offset_int_vec(ConvertedWeights), MemoryLifetime::Prepare, _converted_weights.total_size());
336     }
337     else
338     {
339         _aux_mem[TransposedWeights] = MemoryInfo(offset_int_vec(TransposedWeights), _needs_weights_conversion ? MemoryLifetime::Prepare : MemoryLifetime::Persistent, _reshaped_weights.total_size());
340         _aux_mem[ConvertedWeights]  = MemoryInfo(offset_int_vec(ConvertedWeights), MemoryLifetime::Persistent, _converted_weights.total_size());
341     }
342     _aux_mem[FlattenedSrc] = MemoryInfo(offset_int_vec(FlattenedSrc), MemoryLifetime::Temporary, _flattened_src.total_size());
343 }
344 
has_opt_impl(arm_compute::WeightFormat & expected_weight_format,const ITensorInfo * src,const ITensorInfo * weights,const ITensorInfo * biases,const ITensorInfo * dst,FullyConnectedLayerInfo fc_info,WeightsInfo weights_info)345 Status CpuFullyConnected::has_opt_impl(arm_compute::WeightFormat &expected_weight_format, const ITensorInfo *src, const ITensorInfo *weights,
346                                        const ITensorInfo *biases, const ITensorInfo *dst, FullyConnectedLayerInfo fc_info, WeightsInfo weights_info)
347 {
348     GEMMInfo gemm_info(false, false, true /* Reshape weights only for the first run */);
349     gemm_info.set_activation_info(fc_info.activation_info);
350     gemm_info.set_fast_math(fc_info.enable_fast_math);
351     gemm_info.set_fixed_format(weights_info.weight_format() != WeightFormat::UNSPECIFIED);
352     gemm_info.set_weight_format(weights_info.weight_format());
353 
354     return CpuGemm::has_opt_impl(expected_weight_format, src, weights, biases, dst, gemm_info);
355 }
356 
validate(const ITensorInfo * src,const ITensorInfo * weights,const ITensorInfo * biases,const ITensorInfo * dst,FullyConnectedLayerInfo fc_info,const WeightsInfo & weights_info)357 Status CpuFullyConnected::validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst,
358                                    FullyConnectedLayerInfo fc_info, const WeightsInfo &weights_info)
359 {
360     ARM_COMPUTE_UNUSED(fc_info.retain_internal_weights);
361     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, weights, dst);
362     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(src, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
363 
364     if (is_fixed_format_fast_math(weights_info.weight_format()))
365     {
366         ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(src, DataType::F32);
367         ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(weights, DataType::BFLOAT16);
368         ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(dst, DataType::F32);
369     }
370     else
371     {
372         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, weights, dst);
373     }
374 
375     ARM_COMPUTE_RETURN_ERROR_ON(weights->num_dimensions() > 2);
376     ARM_COMPUTE_RETURN_ERROR_ON(fc_info.activation_info.enabled() && is_data_type_quantized(src->data_type()) && fc_info.activation_info.activation() != ActivationLayerInfo::ActivationFunction::RELU
377                                 && fc_info.activation_info.activation() != ActivationLayerInfo::ActivationFunction::BOUNDED_RELU && fc_info.activation_info.activation() != ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU);
378     ARM_COMPUTE_RETURN_ERROR_ON(!weights->are_values_constant() && (!fc_info.are_weights_reshaped || fc_info.transpose_weights));
379 
380     bool weights_reshaped = fc_info.transpose_weights ? fc_info.are_weights_reshaped : true;
381     bool is_fc_after_conv = true;
382 
383     const ITensorInfo &flatten_src       = TensorInfo(src->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_flatten_shape(src)));
384     const ITensorInfo &reshaped_weights  = TensorInfo(weights->clone()->set_is_resizable(true).reset_padding().set_tensor_shape(compute_transposed_shape(*weights)));
385     const ITensorInfo &converted_weights = weights_reshaped ? TensorInfo(weights->clone()->set_is_resizable(true).reset_padding()) : TensorInfo(*reshaped_weights.clone());
386 
387     // With the Fully Connected layer we can have 4 different cases:
388     //  1) Convolution layer -> Fully Connected layer without batches
389     //  2) Fully Connected layer -> Fully Connected layer without batches
390     //  3) Convolution layer -> Fully Connected layer with batches
391     //  4) Fully Connected layer -> Fully Connected layer with batches
392 
393     const ITensorInfo *src_to_use     = src;
394     const ITensorInfo *weights_to_use = weights;
395 
396     // Check if we have a fully connected layer with batches
397     const bool is_batched_fc_layer = dst->dimension(1) > 1;
398 
399     if(biases != nullptr)
400     {
401         ARM_COMPUTE_RETURN_ERROR_ON(biases->num_dimensions() > 1);
402         if(is_data_type_quantized(src->data_type()))
403         {
404             ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(biases, 1, DataType::S32);
405         }
406         else
407         {
408             ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, biases);
409         }
410     }
411 
412     if(is_batched_fc_layer)
413     {
414         is_fc_after_conv = (TensorShape::num_max_dimensions >= 4) && (std::equal(src->tensor_shape().cbegin() + 3, src->tensor_shape().cend(), dst->tensor_shape().cbegin() + 1));
415     }
416     else
417     {
418         is_fc_after_conv = src->num_dimensions() > 1;
419     }
420 
421     if(!weights_reshaped)
422     {
423         // Validate reshape weights kernel
424         ARM_COMPUTE_RETURN_ON_ERROR(kernels::CpuTransposeKernel::validate(weights, &reshaped_weights));
425         weights_to_use = &reshaped_weights;
426     }
427 
428     if(is_fc_after_conv && (src->data_layout() != fc_info.weights_trained_layout))
429     {
430         // Validate convert weights kernel
431         ARM_COMPUTE_RETURN_ON_ERROR(CpuConvertFullyConnectedWeights::validate(weights_to_use,
432                                                                               &converted_weights,
433                                                                               src->tensor_shape(),
434                                                                               fc_info.weights_trained_layout));
435         weights_to_use = &converted_weights;
436     }
437 
438     if(is_fc_after_conv)
439     {
440         // Fully Connected layer after a Convolution Layer without batches
441         ARM_COMPUTE_RETURN_ERROR_ON((weights_to_use->dimension(1) != (src->dimension(0) * src->dimension(1) * src->dimension(2))));
442 
443         // Validate flatten kernel
444         ARM_COMPUTE_RETURN_ON_ERROR(CpuFlatten::validate(src, &flatten_src));
445         src_to_use = &flatten_src;
446     }
447     else
448     {
449         // Fully Connected layer after a Fully Connected Layer without batches
450         ARM_COMPUTE_RETURN_ERROR_ON(src->dimension(0) != weights_to_use->dimension(1));
451     }
452     // Validate matrix multiply kernel
453     ARM_COMPUTE_RETURN_ON_ERROR(validate_mm(src_to_use, weights_to_use, biases, dst, fc_info.activation_info, fc_info.enable_fast_math, weights_info.weight_format()));
454 
455     return Status{};
456 }
457 
run(ITensorPack & tensors)458 void CpuFullyConnected::run(ITensorPack &tensors)
459 {
460     prepare(tensors);
461 
462     auto src = tensors.get_const_tensor(ACL_SRC_0);
463 
464     CpuAuxTensorHandler flattened_src(offset_int_vec(FlattenedSrc), _flattened_src, tensors, false);
465     CpuAuxTensorHandler transformed_wei(offset_int_vec(_trans_weights_idx), _trans_weights, tensors, false);
466 
467     // Linearize src if it comes from a convolutional layer
468     if(_is_fc_after_conv)
469     {
470         ITensorPack flatten_pack{ { ACL_SRC, src }, { ACL_DST, flattened_src.get() } };
471         _flatten->run(flatten_pack);
472     }
473 
474     ITensorPack gemm_pack = tensors;
475     gemm_pack.add_const_tensor(ACL_SRC_0, (_is_fc_after_conv) ? flattened_src.get() : src);
476     if(_needs_weights_reshape || _needs_weights_conversion)
477     {
478         gemm_pack.add_const_tensor(ACL_SRC_1, transformed_wei.get());
479     }
480 
481     // Run matrix multiply
482     if(_is_quantized_asymmetric)
483     {
484         _mm_gemmlowp->run(gemm_pack);
485     }
486     else
487     {
488         _mm_gemm->run(gemm_pack);
489     }
490 }
491 
prepare(ITensorPack & tensors)492 void CpuFullyConnected::prepare(ITensorPack &tensors)
493 {
494     if(!_is_prepared)
495     {
496         auto weights = tensors.get_const_tensor(ACL_SRC_1);
497 
498         CpuAuxTensorHandler reshaped_weights(offset_int_vec(TransposedWeights), _reshaped_weights, tensors, false);
499         CpuAuxTensorHandler converted_weights(offset_int_vec(ConvertedWeights), _converted_weights, tensors, false);
500 
501         // Pointer to current weights
502         const ITensor *cur_weights = weights;
503 
504         // Reshape of the weights (happens only once)
505         if(_needs_weights_reshape)
506         {
507             // Run reshape weights kernel and mark weights as unused
508             ITensorPack transpose_pack{ { ACL_SRC, weights }, { ACL_DST, reshaped_weights.get() } };
509             NEScheduler::get().schedule_op(_transpose_weights.get(), Window::DimY, _transpose_weights->window(), transpose_pack);
510 
511             cur_weights->mark_as_unused();
512             cur_weights = reshaped_weights.get();
513         }
514 
515         // Convert weights if needed (happens only once)
516         if(_needs_weights_conversion)
517         {
518             ITensorPack convert_pack{ { ACL_SRC, cur_weights }, { ACL_DST, converted_weights.get() } };
519             _convert_weights->run(convert_pack);
520 
521             cur_weights->mark_as_unused();
522             cur_weights = converted_weights.get();
523         }
524 
525         ITensorPack gemm_pack = tensors;
526         gemm_pack.add_const_tensor(ACL_SRC_1, cur_weights);
527 
528         // Prepare GEMM prepare and release unused weights
529         if(!_is_quantized_asymmetric)
530         {
531             _mm_gemm->prepare(gemm_pack);
532         }
533         else
534         {
535             _mm_gemmlowp->prepare(gemm_pack);
536         }
537 
538         _is_prepared = true;
539     }
540 }
541 
workspace() const542 experimental::MemoryRequirements CpuFullyConnected::workspace() const
543 {
544     return _aux_mem;
545 }
546 } // namespace cpu
547 } // namespace arm_compute
548