1*c217d954SCole Faust /* 2*c217d954SCole Faust * Copyright (c) 2021-2022 Arm Limited. 3*c217d954SCole Faust * 4*c217d954SCole Faust * SPDX-License-Identifier: MIT 5*c217d954SCole Faust * 6*c217d954SCole Faust * Permission is hereby granted, free of charge, to any person obtaining a copy 7*c217d954SCole Faust * of this software and associated documentation files (the "Software"), to 8*c217d954SCole Faust * deal in the Software without restriction, including without limitation the 9*c217d954SCole Faust * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 10*c217d954SCole Faust * sell copies of the Software, and to permit persons to whom the Software is 11*c217d954SCole Faust * furnished to do so, subject to the following conditions: 12*c217d954SCole Faust * 13*c217d954SCole Faust * The above copyright notice and this permission notice shall be included in all 14*c217d954SCole Faust * copies or substantial portions of the Software. 15*c217d954SCole Faust * 16*c217d954SCole Faust * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17*c217d954SCole Faust * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18*c217d954SCole Faust * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19*c217d954SCole Faust * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20*c217d954SCole Faust * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21*c217d954SCole Faust * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22*c217d954SCole Faust * SOFTWARE. 23*c217d954SCole Faust */ 24*c217d954SCole Faust #ifndef ARM_COMPUTE_CPU_GEMM_CONV2D_H 25*c217d954SCole Faust #define ARM_COMPUTE_CPU_GEMM_CONV2D_H 26*c217d954SCole Faust 27*c217d954SCole Faust #include "arm_compute/core/TensorInfo.h" 28*c217d954SCole Faust #include "arm_compute/core/Types.h" 29*c217d954SCole Faust #include "src/cpu/ICpuOperator.h" 30*c217d954SCole Faust 31*c217d954SCole Faust #include <memory> 32*c217d954SCole Faust 33*c217d954SCole Faust namespace arm_compute 34*c217d954SCole Faust { 35*c217d954SCole Faust namespace cpu 36*c217d954SCole Faust { 37*c217d954SCole Faust class CpuGemm; 38*c217d954SCole Faust class CpuGemmLowpMatrixMultiplyCore; 39*c217d954SCole Faust class CpuGemmLowpOutputStage; 40*c217d954SCole Faust namespace kernels 41*c217d954SCole Faust { 42*c217d954SCole Faust class CpuWeightsReshapeKernel; 43*c217d954SCole Faust class CpuIm2ColKernel; 44*c217d954SCole Faust class CpuCol2ImKernel; 45*c217d954SCole Faust class CpuReshapeKernel; 46*c217d954SCole Faust } // namespace kernels 47*c217d954SCole Faust 48*c217d954SCole Faust /** Basic function to compute the convolution layer. This function calls the following kernels/functions: 49*c217d954SCole Faust * 50*c217d954SCole Faust * -# @ref cpu::kernels::CpuIm2ColKernel 51*c217d954SCole Faust * -# @ref CpuGemm (if the data type is BFLOAT16/FP16/FP32) 52*c217d954SCole Faust * -# @ref CpuGemmLowpMatrixMultiplyCore (if the data type is QASYMM8/QASYMM8_SIGNED) 53*c217d954SCole Faust * -# @ref CpuGemmLowpOutputStage (if the data type is QASYMM8/QASYMM8_SIGNED) 54*c217d954SCole Faust * -# @ref cpu::kernels::CpuCol2ImKernel (if NCHW data layout) 55*c217d954SCole Faust * -# @ref kernels::CpuWeightsReshapeKernel 56*c217d954SCole Faust * 57*c217d954SCole Faust */ 58*c217d954SCole Faust class CpuGemmConv2d : public ICpuOperator 59*c217d954SCole Faust { 60*c217d954SCole Faust public: 61*c217d954SCole Faust /** Constructor */ 62*c217d954SCole Faust CpuGemmConv2d(); 63*c217d954SCole Faust /** Prevent instances of this class from being copied (As this class contains pointers) */ 64*c217d954SCole Faust CpuGemmConv2d(const CpuGemmConv2d &) = delete; 65*c217d954SCole Faust /** Prevent instances of this class from being moved (As this class contains non movable objects) */ 66*c217d954SCole Faust CpuGemmConv2d(CpuGemmConv2d &&) = delete; 67*c217d954SCole Faust /** Prevent instances of this class from being copied (As this class contains pointers) */ 68*c217d954SCole Faust CpuGemmConv2d &operator=(const CpuGemmConv2d &) = delete; 69*c217d954SCole Faust /** Prevent instances of this class from being moved (As this class contains non movable objects) */ 70*c217d954SCole Faust CpuGemmConv2d &operator=(CpuGemmConv2d &&) = delete; 71*c217d954SCole Faust /** Destructor */ 72*c217d954SCole Faust ~CpuGemmConv2d(); 73*c217d954SCole Faust /** Set the input and output tensors. 74*c217d954SCole Faust * 75*c217d954SCole Faust * Valid data layouts: 76*c217d954SCole Faust * - NHWC 77*c217d954SCole Faust * - NCHW 78*c217d954SCole Faust * 79*c217d954SCole Faust * Valid data type configurations: 80*c217d954SCole Faust * |src0 |src1 |src2 |dst | 81*c217d954SCole Faust * |:--------------|:------------------|:--------|:--------------| 82*c217d954SCole Faust * |F16 |F16 |F16 |F16 | 83*c217d954SCole Faust * |F32 |F32 |F32 |F32 | 84*c217d954SCole Faust * |BFLOAT16 |BFLOAT16 |BFLOAT16 |BFLOAT16 | 85*c217d954SCole Faust * |QASYMM8 |QASYMM8 |S32 |QASYMM8 | 86*c217d954SCole Faust * |QASYMM8 |QSYMM8_PER_CHANNEL |S32 |QASYMM8 | 87*c217d954SCole Faust * |QASYMM8_SIGNED |QASYMM8_SIGNED |S32 |QASYMM8_SIGNED | 88*c217d954SCole Faust * |QASYMM8_SIGNED |QSYMM8_PER_CHANNEL |S32 |QASYMM8_SIGNED | 89*c217d954SCole Faust * 90*c217d954SCole Faust * @param[in] src Source tensor info. 3 lower dimensions represent a single input [width, height, IFM], 91*c217d954SCole Faust * while every optional dimension from 4 and above represent a batch of inputs. 92*c217d954SCole Faust * Data types supported: QASYMM8/QASYMM8_SIGNED/BFLOAT16/F16/F32. 93*c217d954SCole Faust * @param[in] weights Weights tensor info. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. 94*c217d954SCole Faust * Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/BFLOAT16/F16/F32. 95*c217d954SCole Faust * @param[in] biases Biases tensor info. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. 96*c217d954SCole Faust * Data type supported: Should match @p input data type, except for input of QASYMM8/QASYMM8_SIGNED type where biases should be of S32 type. 97*c217d954SCole Faust * @param[out] dst Destination tensor info. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs. 98*c217d954SCole Faust * Data types supported: Same as @p input. 99*c217d954SCole Faust * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. 100*c217d954SCole Faust * @param[in] weights_info Specifies if the weights tensor has been reshaped with NEWeightsReshapeKernel. If this is not part of the fully connected layer the weights 101*c217d954SCole Faust * tensor has also been transposed with cpu::kernels::CpuGemmTranspose1xWKernel. Data type supported: Same as @p input. 102*c217d954SCole Faust * @param[in] dilation (Optional) Dilation, in elements, across x and y. Defaults to (1, 1). 103*c217d954SCole Faust * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported. 104*c217d954SCole Faust * @param[in] enable_fast_math (Optional) Enable fast math computation. In case this flag were set, the function could dispatch the fastest implementation 105*c217d954SCole Faust * available which may introduce a drop of accuracy as well. Default is false 106*c217d954SCole Faust * @param[in] num_groups (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is not supported 107*c217d954SCole Faust */ 108*c217d954SCole Faust void configure(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *dst, const PadStrideInfo &conv_info, const WeightsInfo &weights_info = WeightsInfo(), 109*c217d954SCole Faust const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo(), bool enable_fast_math = false, unsigned int num_groups = 1); 110*c217d954SCole Faust /** Static function to check if given info will lead to a valid configuration 111*c217d954SCole Faust * 112*c217d954SCole Faust * Similar to CpuGemmConvolution::configure() 113*c217d954SCole Faust * 114*c217d954SCole Faust * @return a status 115*c217d954SCole Faust */ 116*c217d954SCole Faust static Status validate(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info, 117*c217d954SCole Faust const WeightsInfo &weights_info = WeightsInfo(), const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo(), 118*c217d954SCole Faust bool enable_fast_math = false, unsigned int num_groups = 1); 119*c217d954SCole Faust 120*c217d954SCole Faust /** Indicates whether or not there is an optimal assembly implementation that can be used to process the given parameters. 121*c217d954SCole Faust * 122*c217d954SCole Faust * The paramter list is the same as @ref NEGEMMConvolutionLayer::has_opt_impl 123*c217d954SCole Faust * 124*c217d954SCole Faust * @return a status. 125*c217d954SCole Faust */ 126*c217d954SCole Faust static Status has_opt_impl(arm_compute::WeightFormat &expected_weight_format, const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, 127*c217d954SCole Faust const PadStrideInfo &conv_info, 128*c217d954SCole Faust const WeightsInfo &weights_info = WeightsInfo(), const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo(), 129*c217d954SCole Faust const bool enable_fast_math = false); 130*c217d954SCole Faust 131*c217d954SCole Faust // Inherited methods overridden: 132*c217d954SCole Faust void run(ITensorPack &tensors) override; 133*c217d954SCole Faust void prepare(ITensorPack &tensors) override; 134*c217d954SCole Faust experimental::MemoryRequirements workspace() const override; 135*c217d954SCole Faust 136*c217d954SCole Faust private: 137*c217d954SCole Faust /** Configures the appropriate matrix multiply routine 138*c217d954SCole Faust * 139*c217d954SCole Faust * @param[in] src Input tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/BFLOAT16/F16/F32. 140*c217d954SCole Faust * @param[in] weights Weights tensor info. Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/BFLOAT16/F16/F32. 141*c217d954SCole Faust * @param[in] biases Biases tensor info. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. 142*c217d954SCole Faust * Data type supported: Should match @p input data type, except for input of QASYMM8/QASYMM8_SIGNED type where biases should be of S32 type. 143*c217d954SCole Faust * @param[out] dst Output tensor info. Data types supported: Same as @p input, 144*c217d954SCole Faust * except for input of QASYMM8/QASYMM8_SIGNED type where output should be of S32 type. 145*c217d954SCole Faust * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported. 146*c217d954SCole Faust * @param[in] enable_fast_math (Optional) Enable fast math computation. In case this flag were set, the function could dispatch the fastest implementation 147*c217d954SCole Faust * available which may introduce a drop of accuracy as well. Default is false 148*c217d954SCole Faust * @param[in] gemm_3d_depth (Optional) Depth of GEMM 3D (Defaults to 1) 149*c217d954SCole Faust * @param[in] fixed_format (Optional) Select GEMM execution with variable weights. 150*c217d954SCole Faust * @param[in] weight_format (Optional) The layout to be used for the weights tensor when running GEMM with variable weights. 151*c217d954SCole Faust */ 152*c217d954SCole Faust void configure_mm(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, ITensorInfo *output, const ActivationLayerInfo &act_info = ActivationLayerInfo(), 153*c217d954SCole Faust bool enable_fast_math = false, int gemm_3d_depth = 1, bool fixed_format = false, arm_compute::WeightFormat weight_format = arm_compute::WeightFormat::UNSPECIFIED); 154*c217d954SCole Faust /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMConvolutionLayer matrix multiply routines 155*c217d954SCole Faust * 156*c217d954SCole Faust * @param[in] src Input tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/BFLOAT16/F16/F32. 157*c217d954SCole Faust * @param[in] weights Weights tensor info. Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/BFLOAT16/F16/F32. 158*c217d954SCole Faust * @param[in] biases Biases tensor info. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. 159*c217d954SCole Faust * Data type supported: Should match @p input data type, except for input of QASYMM8/QASYMM8_SIGNED type where biases should be of S32 type. 160*c217d954SCole Faust * @param[in] dst Output tensor info. Data types supported: Same as @p input, 161*c217d954SCole Faust * except for input of QASYMM8/QASYMM8_SIGNED type where output should be of S32 type. 162*c217d954SCole Faust * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported. 163*c217d954SCole Faust * @param[in] enable_fast_math (Optional) Enable fast math computation. In case this flag were set, the function could dispatch the fastest implementation 164*c217d954SCole Faust * available which may introduce a drop of accuracy as well. Default is false 165*c217d954SCole Faust * @param[in] gemm_3d_depth (Optional) Depth of GEMM 3D (Defaults to 1) 166*c217d954SCole Faust * @param[in] skip_im2col (Optional) Flag which specifies if im2col has to be skipped. i.e. 1x1 convolution with NHWC data layout. (Default to false) 167*c217d954SCole Faust * @param[in] fixed_format (Optional) Select GEMM execution with variable weights. 168*c217d954SCole Faust * @param[in] weight_format (Optional) The layout to be used for the weights tensor when running GEMM with variable weights. 169*c217d954SCole Faust * 170*c217d954SCole Faust * @return a status 171*c217d954SCole Faust */ 172*c217d954SCole Faust static Status validate_mm(const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst, const ActivationLayerInfo &act_info = ActivationLayerInfo(), 173*c217d954SCole Faust bool enable_fast_math = false, int gemm_3d_depth = 1, bool skip_im2col = false, bool fixed_format = false, arm_compute::WeightFormat weight_format = arm_compute::WeightFormat::UNSPECIFIED); 174*c217d954SCole Faust /** Static function to check if GEMM3D is supported in @ref NEGEMM or in @ref CpuGemmMLowpMatrixMultiplyCore 175*c217d954SCole Faust * 176*c217d954SCole Faust * @param[in] src Input tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/BFLOAT16/F16/F32. 177*c217d954SCole Faust * @param[in] weights Weights tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/BFLOAT16/F16/F32. 178*c217d954SCole Faust * @param[in] act_info Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported. 179*c217d954SCole Faust * @param[in] gemm_3d_depth Depth of GEMM 3D 180*c217d954SCole Faust * @param[in] skip_im2col Flag which specifies if im2col has to be skipped. i.e. 1x1 convolution with NHWC data layout 181*c217d954SCole Faust * 182*c217d954SCole Faust * @return a status 183*c217d954SCole Faust */ 184*c217d954SCole Faust static Status validate_gemm3d(const ITensorInfo *src, const ITensorInfo *weights, const ActivationLayerInfo &act_info, int gemm_3d_depth, bool skip_im2col); 185*c217d954SCole Faust 186*c217d954SCole Faust struct SkipInfo 187*c217d954SCole Faust { 188*c217d954SCole Faust bool skip_im2col; 189*c217d954SCole Faust bool skip_col2im; 190*c217d954SCole Faust }; 191*c217d954SCole Faust 192*c217d954SCole Faust /** Static function to provide skip_im2col and skip_col2im information. 193*c217d954SCole Faust * 194*c217d954SCole Faust * @param[in] src Input tensor info. 195*c217d954SCole Faust * @param[in] weights Weights tensor info. 196*c217d954SCole Faust * @param[in] conv_info Contains padding and stride information described in @ref PadStrideInfo. 197*c217d954SCole Faust * @param[in] dilation Dilation, in elements, across x and y. 198*c217d954SCole Faust * @param[in] act_info Activation layer information in case of a fused activation. 199*c217d954SCole Faust * 200*c217d954SCole Faust * @return a SkipInfo instance. 201*c217d954SCole Faust */ 202*c217d954SCole Faust static SkipInfo skip_im_col_info(const ITensorInfo *src, const ITensorInfo *weights, const PadStrideInfo &conv_info, 203*c217d954SCole Faust const Size2D &dilation, const ActivationLayerInfo &act_info); 204*c217d954SCole Faust 205*c217d954SCole Faust /** Indicates if the convolution executes in variable weights mode. 206*c217d954SCole Faust * 207*c217d954SCole Faust * Similar to @ref CpuGemm::isVarWeightsKernel 208*c217d954SCole Faust */ 209*c217d954SCole Faust bool isVarWeightsKernel() const; 210*c217d954SCole Faust enum AuxTensorIdx 211*c217d954SCole Faust { 212*c217d954SCole Faust // CpuGemmLowpMatrixMultiplyCore has up to 8 internal tensors 213*c217d954SCole Faust Im2ColOutput = 9, 214*c217d954SCole Faust WeightsReshaped, 215*c217d954SCole Faust GemmOutput, 216*c217d954SCole Faust Count 217*c217d954SCole Faust }; 218*c217d954SCole Faust 219*c217d954SCole Faust std::unique_ptr<kernels::CpuWeightsReshapeKernel> _weights_reshape_kernel; 220*c217d954SCole Faust std::unique_ptr<cpu::kernels::CpuIm2ColKernel> _im2col_kernel; 221*c217d954SCole Faust std::unique_ptr<CpuGemm> _mm_gemm; 222*c217d954SCole Faust std::unique_ptr<CpuGemmLowpMatrixMultiplyCore> _mm_gemmlowp; 223*c217d954SCole Faust std::unique_ptr<kernels::CpuCol2ImKernel> _col2im_kernel; 224*c217d954SCole Faust std::unique_ptr<kernels::CpuReshapeKernel> _reshape_kernel; 225*c217d954SCole Faust 226*c217d954SCole Faust TensorInfo _im2col_output; 227*c217d954SCole Faust TensorInfo _weights_reshaped; 228*c217d954SCole Faust TensorInfo _gemm_output; 229*c217d954SCole Faust TensorInfo _gemm_output_3d; 230*c217d954SCole Faust 231*c217d954SCole Faust DataLayout _data_layout; 232*c217d954SCole Faust 233*c217d954SCole Faust bool _skip_im2col; 234*c217d954SCole Faust bool _skip_col2im; 235*c217d954SCole Faust bool _is_quantized; 236*c217d954SCole Faust bool _is_prepared; 237*c217d954SCole Faust 238*c217d954SCole Faust experimental::MemoryRequirements _aux_mem{ Count }; 239*c217d954SCole Faust }; 240*c217d954SCole Faust } // namespace cpu 241*c217d954SCole Faust } // namespace arm_compute 242*c217d954SCole Faust #endif /* ARM_COMPUTE_CPU_GEMM_CONV2D_H */ 243