1 /* 2 * Copyright (c) 2021-2023 Arm Limited. 3 * 4 * SPDX-License-Identifier: MIT 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to 8 * deal in the Software without restriction, including without limitation the 9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 10 * sell copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in all 14 * copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 * SOFTWARE. 23 */ 24 #ifndef ARM_COMPUTE_CPU_FULLY_CONNECTED_H 25 #define ARM_COMPUTE_CPU_FULLY_CONNECTED_H 26 27 #include "src/cpu/ICpuOperator.h" 28 29 #include "arm_compute/core/TensorInfo.h" 30 31 #include <memory> 32 33 namespace arm_compute 34 { 35 namespace cpu 36 { 37 // Forward declarations 38 class CpuConvertFullyConnectedWeights; 39 class CpuFlatten; 40 class CpuGemm; 41 class CpuGemmLowpMatrixMultiplyCore; 42 namespace kernels 43 { 44 class CpuTransposeKernel; 45 } // namespace kernels 46 /** Basic function to compute a Fully Connected layer. This function calls the following kernels: 47 * -# @ref kernels::CpuIm2ColKernel (called when the input comes from a convolutional layer) 48 * -# @ref kernels::CpuTransposeKernel (if @p are_weights_reshaped is set to false and transpose_weights is set to true ) (called once) 49 * -# @ref CpuGemm or @ref CpuGemmLowpMatrixMultiplyCore (if quantized asymmetric) 50 * -# @ref kernels::CpuGemmMatrixAdditionKernel or @ref CpuGemmLowpOutputStage (if quantized asymmetric) (if @p biases is not equal to nullptr) 51 * 52 * @note The fully connected layer accepts "weights" tensors only with 2 dimensions. 53 */ 54 class CpuFullyConnected : public ICpuOperator 55 { 56 public: 57 /** Constructor */ 58 CpuFullyConnected(); 59 /** Destructor */ 60 ~CpuFullyConnected(); 61 /** Set the input and output tensors. 62 * 63 * Valid data layouts: 64 * - NHWC 65 * - NCHW 66 * 67 * Valid data type configurations: 68 * |src0 |src1 |src2 |dst | 69 * |:--------------|:------------------|:------|:--------------| 70 * |F16 |F16 |F16 |F16 | 71 * |F32 |F32 |F32 |F32 | 72 * |QASYMM8 |QASYMM8 |S32 |QASYMM8 | 73 * |QASYMM8_SIGNED |QASYMM8_SIGNED |S32 |QASYMM8_SIGNED | 74 * 75 * @param[in] src Source tensor info. Data type supported: QASYMM8/QASYMM8_SIGNED/F16/F32. 76 * @param[in] weights Weights tensor info. The weights must be 2 dimensional. 77 * If this function is called after a Convolution Layer, the (transposed) weights will have as many rows as the product of the first 3 input's dimensions. 78 * If it is called after another FullyConnected Layer, the (transposed) weights will have as many rows as the input's first dimension. 79 * Data type supported: Same as @p src. 80 * @param[in] biases Bias tensor info. Can be nullptr. Data type supported: Same as @p weights, S32 if @p weights is QASYMM8/QASYMM8_SIGNED. 81 * @param[out] dst Destination tensor info. Its shape should be equal to the output of a matrix multiplication between: 82 * - The output of im2col on the input and the (transposed) 2D weights, if the function is called after a Convolution Layer 83 * - The input tensor and the (transposed) 2D weights, if the function is called after another FullyConnected Layer. 84 * Data type supported: Same as @p src. 85 * @param[in] fc_info (Optional) Fully connected layer additional info 86 * @param[in] weights_info (Optional) Stores neccessary compute information when weights are already reshaped 87 */ 88 void configure(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, 89 FullyConnectedLayerInfo fc_info = FullyConnectedLayerInfo(), const WeightsInfo &weights_info = WeightsInfo()); 90 /** Static function to check if given info will lead to a valid configuration of @ref CpuFullyConnected 91 * 92 * Similar to @ref CpuFullyConnected::configure() 93 * 94 * @return a status 95 */ 96 static Status validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, 97 FullyConnectedLayerInfo fc_info = FullyConnectedLayerInfo(), const WeightsInfo &weights_info = WeightsInfo()); 98 99 /** Static function that queries whether there exists fixed-format kernel and if it exists it will return in the first argument in what format 100 * weights are expected to be reshaped as defined by WeightFormat class. Apart from the first argument the rest of the arguments are the same 101 * as in @ref CpuFullyConnectedLayer::validate() except that all arguments are required. 102 * 103 * @return a status 104 */ 105 static Status has_opt_impl(arm_compute::WeightFormat &expected_weight_format, const ITensorInfo *src, const ITensorInfo *weights, 106 const ITensorInfo *biases, const ITensorInfo *dst, 107 FullyConnectedLayerInfo fc_info, WeightsInfo weights_info); 108 109 //Inherited methods override 110 void run(ITensorPack &tensors) override; 111 void prepare(ITensorPack &tensors) override; 112 experimental::MemoryRequirements workspace() const override; 113 114 private: 115 void configure_fc_fc(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const ActivationLayerInfo &act); 116 void configure_conv_fc(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const ActivationLayerInfo &act); 117 void configure_mm(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const ActivationLayerInfo &act); 118 119 enum AuxTensorIdx 120 { 121 AsmGemmWorkspace = 0, 122 Pretranspose, 123 GemmTemp1, // Both CpuGemm and CpuGemmLowpMatrixMultiplyCore 124 GemmTemp2, // Both CpuGemm and CpuGemmLowpMatrixMultiplyCore 125 GemmTemp3, // Both CpuGemm and CpuGemmLowpMatrixMultiplyCore 126 GemmTemp4, // CpuGemmLowpMatrixMultiplyCore only 127 GemmTemp5, // CpuGemmLowpMatrixMultiplyCore only 128 GemmTemp6, // CpuGemmLowpMatrixMultiplyCore only 129 GemmTemp7, // CpuGemmLowpMatrixMultiplyCore only 130 TransposedWeights, 131 ConvertedWeights, 132 FlattenedSrc, 133 Count 134 }; 135 136 std::unique_ptr<CpuFlatten> _flatten; 137 std::unique_ptr<CpuConvertFullyConnectedWeights> _convert_weights; 138 std::unique_ptr<kernels::CpuTransposeKernel> _transpose_weights; 139 std::unique_ptr<CpuGemm> _mm_gemm; 140 std::unique_ptr<CpuGemmLowpMatrixMultiplyCore> _mm_gemmlowp; 141 142 TensorInfo _flattened_src; 143 TensorInfo _converted_weights; 144 TensorInfo _reshaped_weights; 145 TensorInfo _trans_weights; 146 AuxTensorIdx _trans_weights_idx; 147 148 experimental::MemoryRequirements _aux_mem; 149 150 bool _needs_weights_conversion; 151 bool _needs_weights_reshape; 152 bool _is_fc_after_conv; 153 bool _is_quantized_asymmetric; 154 bool _is_prepared; 155 bool _enable_fast_math; 156 bool _fixed_format; 157 arm_compute::WeightFormat _weight_format; 158 }; 159 } // namespace cpu 160 } // namespace arm_compute 161 #endif /* ARM_COMPUTE_CPU_FULLY_CONNECTED_H */ 162