1 /* 2 * Copyright (c) 2017-2023 Arm Limited. 3 * 4 * SPDX-License-Identifier: MIT 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to 8 * deal in the Software without restriction, including without limitation the 9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 10 * sell copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in all 14 * copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 * SOFTWARE. 23 */ 24 #ifndef ARM_COMPUTE_NEFULLYCONNECTEDLAYER_H 25 #define ARM_COMPUTE_NEFULLYCONNECTEDLAYER_H 26 27 #include "arm_compute/runtime/IFunction.h" 28 #include "arm_compute/runtime/IMemoryManager.h" 29 #include "arm_compute/runtime/IWeightsManager.h" 30 31 #include "arm_compute/runtime/NEON/functions/NETranspose.h" 32 #include "arm_compute/runtime/Tensor.h" 33 34 #include <memory> 35 36 namespace arm_compute 37 { 38 namespace weights_transformations 39 { 40 /** Basic function to manage the reshape weights generated from @ref NETranspose */ 41 class NEFullyConnectedLayerReshapeWeightsManaged : public ITransformWeights 42 { 43 public: run()44 void run() override 45 { 46 _output.allocator()->allocate(); 47 _func.run(); 48 _reshape_run = true; 49 } 50 release()51 void release() override 52 { 53 _output.allocator()->free(); 54 } 55 get_weights()56 ITensor *get_weights() override 57 { 58 return &_output; 59 } 60 uid()61 uint32_t uid() override 62 { 63 return _uid; 64 } 65 configure(const ITensor * input)66 void configure(const ITensor *input) 67 { 68 _func.configure(input, &_output); 69 } 70 71 private: 72 static constexpr uint32_t _uid = 0x0; 73 Tensor _output{}; 74 NETranspose _func{}; 75 }; 76 } // namespace weights_transformations 77 78 /** Basic function to compute a Fully Connected layer. This function calls the following kernels: 79 * -# @ref cpu::kernels::CpuIm2ColKernel (called when the input comes from a convolutional layer) 80 * -# @ref NETranspose (if @p are_weights_reshaped is set to false and transpose_weights is set to true ) (called once) 81 * -# @ref NEGEMM or @ref NEGEMMLowpMatrixMultiplyCore (if quantized asymmetric) 82 * -# @ref cpu::kernels::CpuGemmMatrixAdditionKernel or @ref NEGEMMLowpOutputStage (if quantized asymmetric) (if @p biases is not equal to nullptr) 83 * 84 * @note The fully connected layer accepts "weights" tensors only with 2 dimensions. 85 */ 86 class NEFullyConnectedLayer : public IFunction 87 { 88 public: 89 /** Constructor */ 90 NEFullyConnectedLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr, IWeightsManager *weights_manager = nullptr); 91 /** Prevent instances of this class from being copied (As this class contains pointers) */ 92 NEFullyConnectedLayer(const NEFullyConnectedLayer &) = delete; 93 /** Prevent instances of this class from being moved (As this class contains pointers) */ 94 NEFullyConnectedLayer(NEFullyConnectedLayer &&) = delete; 95 /** Prevent instances of this class from being copied (As this class contains pointers) */ 96 NEFullyConnectedLayer &operator=(const NEFullyConnectedLayer &) = delete; 97 /** Prevent instances of this class from being moved (As this class contains pointers) */ 98 NEFullyConnectedLayer &operator=(NEFullyConnectedLayer &&) = delete; 99 /** Default destructor */ 100 ~NEFullyConnectedLayer(); 101 /** Set the input and output tensors. 102 * 103 * Valid data layouts: 104 * - NHWC 105 * - NCHW 106 * 107 * Valid data type configurations: 108 * |src0 |src1 |src2 |dst | 109 * |:--------------|:------------------|:------|:--------------| 110 * |F16 |F16 |F16 |F16 | 111 * |F32 |F32 |F32 |F32 | 112 * |QASYMM8 |QASYMM8 |S32 |QASYMM8 | 113 * |QASYMM8_SIGNED |QASYMM8_SIGNED |S32 |QASYMM8_SIGNED | 114 * 115 * @param[in] input Source tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/F16/F32. 116 * @param[in] weights Weights tensor. The weights must be 2 dimensional. 117 * If this function is called after a Convolution Layer, the (transposed) weights will have as many rows as the product of the first 3 input's dimensions. 118 * If it is called after another FullyConnected Layer, the (transposed) weights will have as many rows as the input's first dimension. 119 * Data type supported: Same as @p input. 120 * @param[in] biases Bias tensor. Can be nullptr. Data type supported: Same as @p weights, S32 if @p weights is QASYMM8/QASYMM8_SIGNED. 121 * @param[out] output Destination tensor. Its shape should be equal to the output of a matrix multiplication between: 122 * - The output of im2col on the input and the (transposed) 2D weights, if the function is called after a Convolution Layer 123 * - The input tensor and the (transposed) 2D weights, if the function is called after another FullyConnected Layer. 124 * Data type supported: Same as @p input. 125 * @param[in] fc_info (Optional) Fully connected layer additional info 126 * @param[in] weights_info (Optional) Stores neccessary compute information when weights are already reshaped 127 */ 128 void configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, 129 FullyConnectedLayerInfo fc_info = FullyConnectedLayerInfo(), const WeightsInfo &weights_info = WeightsInfo()); 130 /** Static function to check if given info will lead to a valid configuration of @ref NEFullyConnectedLayer 131 * 132 * Similar to @ref NEFullyConnectedLayer::configure() 133 * 134 * @return a status 135 */ 136 static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, 137 FullyConnectedLayerInfo fc_info = FullyConnectedLayerInfo(), const WeightsInfo &weights_info = WeightsInfo()); 138 139 /** Static function that queries whether fixed-format kernel exists for a given problem description 140 * 141 * @param[out] expected_weight_format Format in which weights should be for found fixed format kernel 142 * @param[in] input Source tensor 143 * @param[in] weights Weights tensor. 144 * @param[in] biases Bias tensor. Can be nullptr. Data type supported: Same as @p weights, S32 if @p weights is QASYMM8/QASYMM8_SIGNED. 145 * @param[in] output Destination tensor 146 * @param[in] fc_info Fully connected layer additional info 147 * @param[in] weights_info Describes weights shape 148 * 149 * @return a status 150 */ 151 static Status has_opt_impl(arm_compute::WeightFormat &expected_weight_format, const ITensorInfo *input, const ITensorInfo *weights, 152 const ITensorInfo *biases, const ITensorInfo *output, const FullyConnectedLayerInfo &fc_info, const WeightsInfo &weights_info); 153 154 //Inherited methods override 155 void run() override; 156 void prepare() override; 157 158 private: 159 struct Impl; 160 std::unique_ptr<Impl> _impl; 161 }; 162 } // namespace arm_compute 163 #endif /* ARM_COMPUTE_NEFULLYCONNECTEDLAYER_H */ 164