xref: /aosp_15_r20/external/ComputeLibrary/arm_compute/runtime/NEON/functions/NEConvolutionLayer.h (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2018-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_NECONVOLUTIONLAYER_H
25 #define ARM_COMPUTE_NECONVOLUTIONLAYER_H
26 
27 #include "arm_compute/runtime/IFunction.h"
28 
29 #include "arm_compute/core/ITensorInfo.h"
30 #include "arm_compute/core/Types.h"
31 #include "arm_compute/runtime/MemoryGroup.h"
32 
33 #include <memory>
34 
35 namespace arm_compute
36 {
37 // Forward declarations
38 class ITensor;
39 
40 /** Basic function to simulate a convolution layer. This function calls one of the following functions:
41  * -# @ref cpu::CpuGemm     (executed only in case GEMM is required for the operation)
42  * -# @ref cpu::CpuWinogradConv2d (executed only in case Winograd is required for the operation)
43  * -# @ref cpu::CpuDirectConv2d   (executed only in case Direct Convolution is required for the operation)
44  * -# @ref NEFFTConvolutionLayer      (executed only in case FFT is required for the operation)
45  *
46  *
47  * The function selects one of the algorithms mentioned above based on:
48  *      - The size of the kernel
49  *      - Number of input/output feature maps
50  *      - Amount of memory needed
51  *
52  * Generally GEMM-based convolution is executed when neither Winograd nor FFT nor Direct convolution can be performed.
53  *
54  * FP32 Algorithm| Filter Size                                        |   Input/Output feature maps               |
55  * --------------|----------------------------------------------------|-------------------------------------------|
56  * Winograd      | 3x3 1x3 3x1 5x1 1x5 5x5(fast maths) 7x1 1x7        |  Input channels is greater than 3         |
57  * FFT           | Squared kernels and greater than 9x9               |  Input feature maps > Output feature maps |
58  * DirectConv    | 9x9                                                |                                           |
59  * GEMM          | Any size                                           |                                           |
60  *
61  * Winograd 5x5 requires fast maths enabled.
62  *
63  * FP16 Algorithm| Filter Size      |
64  * --------------|------------------|
65  * Winograd      | Not supported    |
66  * FFT           | Not supported    |
67  * DirectConv    | 9x9              |
68  * GEMM          | Any size         |
69  *
70  *
71  */
72 class NEConvolutionLayer : public IFunction
73 {
74 public:
75     /** Constructor */
76     NEConvolutionLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
77     /** Prevent instances of this class from being copied (As this class contains pointers) */
78     NEConvolutionLayer(const NEConvolutionLayer &) = delete;
79     /** Prevent instances of this class from being copied (As this class contains pointers) */
80     NEConvolutionLayer &operator=(const NEConvolutionLayer &) = delete;
81     /** Default move constructor */
82     NEConvolutionLayer(NEConvolutionLayer &&) = default;
83     /** Prevent instances of this class from being moved (As this class contains non movable objects) */
84     NEConvolutionLayer &operator=(NEConvolutionLayer &&) = default;
85     /** Default destructor */
86     ~NEConvolutionLayer();
87     /** Set the input and output tensors.
88      *
89      * Valid data layouts:
90      * - NHWC
91      * - NCHW
92      *
93      * Valid data type configurations:
94      * |src0           |src1               |src2   |dst            |
95      * |:--------------|:------------------|:------|:--------------|
96      * |F16            |F16                |F16    |F16            |
97      * |F32            |F32                |F32    |F32            |
98      * |QASYMM8        |QASYMM8            |S32    |QASYMM8        |
99      * |QASYMM8        |QSYMM8_PER_CHANNEL |S32    |QASYMM8        |
100      * |QASYMM8_SIGNED |QASYMM8_SIGNED     |S32    |QASYMM8_SIGNED |
101      * |QASYMM8_SIGNED |QSYMM8_PER_CHANNEL |S32    |QASYMM8_SIGNED |
102      *
103      * @param[in]  input            Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
104      *                              while every optional dimension from 4 and above represent a batch of inputs.
105      *                              Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
106      * @param[in]  weights          Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
107      *                              Data type supported: Same as @p input, also could be QSYMM8_PER_CHANNEL if input is QASYMM8/QASYMM8_SIGNED.
108      * @param[in]  biases           Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
109      *                              Data type supported: Same as @p input, except for input of QASYMM8/QASYMM8_SIGNED type where biases should be of S32 type.
110      * @param[out] output           Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
111      *                              Data types supported: Same as @p input.
112      * @param[in]  conv_info        Contains padding and stride information described in @ref PadStrideInfo.
113      * @param[in]  weights_info     Specifies if the weights tensor has been reshaped with NEWeightsReshapeKernel. If this is not part of the fully connected layer the weights
114      *                              tensor has also been transposed with cpu::kernels::CpuGemmTranspose1xWKernel. Data type supported: Same as @p input.
115      * @param[in]  dilation         (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
116      * @param[in]  act_info         (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported.
117      * @param[in]  enable_fast_math (Optional) Enable fast math computation. In case this flag were set, the function could dispatch the fastest implementation
118      *                              available which may introduce a drop of accuracy as well. Default is false
119      * @param[in]  num_groups       (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is not supported
120      */
121     void configure(ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info = WeightsInfo(),
122                    const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo(), bool enable_fast_math = false, unsigned int num_groups = 1);
123     /** Static function to check if given info will lead to a valid configuration of @ref NEConvolutionLayer
124      *
125      * @param[in] input            Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
126      *                             while every optional dimension from 4 and above represent a batch of inputs.
127      *                             Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
128      * @param[in] weights          Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
129      *                             Data type supported:Same as @p input, also could be QSYMM8_PER_CHANNEL if input is QASYMM8/QASYMM8_SIGNED.
130      * @param[in] biases           Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
131      *                             Data type supported: Same as @p input, except for input of QASYMM8/QASYMM8_SIGNED type where biases should be of S32 type.
132      * @param[in] output           Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
133      *                             Data types supported: Same as @p input.
134      * @param[in] conv_info        Contains padding and stride information described in @ref PadStrideInfo.
135      * @param[in] weights_info     Specifies if the weights tensor has been reshaped with NEWeightsReshapeKernel. If this is not part of the fully connected layer the weights
136      *                             tensor has also been transposed with cpu::kernels::CpuGemmTranspose1xWKernel. Data type supported: Same as @p input.
137      * @param[in] dilation         (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
138      * @param[in] act_info         (Optional) Activation layer information in case of a fused activation.
139      * @param[in] enable_fast_math (Optional) Enable fast math computation. In case this flag were set, the function could dispatch the fastest implementation
140      *                             available which may introduce a drop of accuracy as well. Default is false
141      * @param[in] num_groups       (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is not supported
142      *
143      * @return a status
144      */
145     static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
146                            const WeightsInfo &weights_info = WeightsInfo(), const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo(), bool enable_fast_math = false,
147                            unsigned int num_groups = 1);
148     /** Static function to check if given info will return the convolution called by @ref NEConvolutionLayer
149      *
150      * @param[in] input            Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
151      *                             while every optional dimension from 4 and above represent a batch of inputs.
152      *                             Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
153      * @param[in] weights          Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
154      *                             Data type supported:Same as @p input, also could be QSYMM8_PER_CHANNEL if input is QASYMM8/QASYMM8_SIGNED.
155      * @param[in] output           Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
156      *                             Data types supported: Same as @p input.
157      * @param[in] conv_info        Contains padding and stride information described in @ref PadStrideInfo.
158      * @param[in] weights_info     Specifies if the weights tensor has been reshaped with NEWeightsReshapeKernel. If this is not part of the fully connected layer the weights
159      *                             tensor has also been transposed with cpu::kernels::CpuGemmTranspose1xWKernel. Data type supported: Same as @p input.
160      * @param[in] dilation         (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
161      * @param[in] act_info         (Optional) Activation layer information in case of a fused activation.
162      * @param[in] enable_fast_math (Optional) Enable fast math computation. In case this flag were set, the function could dispatch the fastest implementation
163      *                             available which may introduce a drop of accuracy as well. Default is false
164      *
165      * @return the Convolution Method Hint
166      */
167     static ConvolutionMethod get_convolution_method(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *output, const PadStrideInfo &conv_info,
168                                                     const WeightsInfo &weights_info = WeightsInfo(), const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo(), bool enable_fast_math = false);
169     // Inherited methods overridden:
170     void run() override;
171     void prepare() override;
172 
173 private:
174     struct Impl;
175     std::unique_ptr<Impl> _impl;
176 };
177 } // namespace arm_compute
178 #endif /* ARM_COMPUTE_NECONVOLUTIONLAYER_H */
179