xref: /aosp_15_r20/external/ComputeLibrary/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2017-2022 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_NEGEMMCONVOLUTIONLAYER_H
25 #define ARM_COMPUTE_NEGEMMCONVOLUTIONLAYER_H
26 
27 #include "arm_compute/runtime/IFunction.h"
28 
29 #include "arm_compute/core/Types.h"
30 #include "arm_compute/runtime/IFunction.h"
31 #include "arm_compute/runtime/IMemoryManager.h"
32 #include "arm_compute/runtime/IWeightsManager.h"
33 #include "arm_compute/runtime/MemoryGroup.h"
34 
35 #include <memory>
36 
37 namespace arm_compute
38 {
39 class ITensor;
40 class ITensorInfo;
41 
42 /** Basic function to compute the convolution layer. This function calls the following kernels/functions:
43  *
44  * -# @ref cpu::CpuGemmConv2d
45  *
46  */
47 class NEGEMMConvolutionLayer : public IFunction
48 {
49 public:
50     /** Constructor */
51     NEGEMMConvolutionLayer(const std::shared_ptr<IMemoryManager> &memory_manager = nullptr, IWeightsManager *weights_manager = nullptr);
52     /** Prevent instances of this class from being copied (As this class contains pointers) */
53     NEGEMMConvolutionLayer(const NEGEMMConvolutionLayer &) = delete;
54     /** Prevent instances of this class from being moved (As this class contains non movable objects) */
55     NEGEMMConvolutionLayer(NEGEMMConvolutionLayer &&) = delete;
56     /** Prevent instances of this class from being copied (As this class contains pointers) */
57     NEGEMMConvolutionLayer &operator=(const NEGEMMConvolutionLayer &) = delete;
58     /** Prevent instances of this class from being moved (As this class contains non movable objects) */
59     NEGEMMConvolutionLayer &operator=(NEGEMMConvolutionLayer &&) = delete;
60     /** Default destructor */
61     ~NEGEMMConvolutionLayer();
62     /** Set the input and output tensors.
63      *
64      * Valid data layouts:
65      * - NHWC
66      * - NCHW
67      *
68      * Valid data type configurations:
69      * |src0           |src1               |src2     |dst            |
70      * |:--------------|:------------------|:--------|:--------------|
71      * |F16            |F16                |F16      |F16            |
72      * |F32            |F32                |F32      |F32            |
73      * |BFLOAT16       |BFLOAT16           |BFLOAT16 |BFLOAT16       |
74      * |QASYMM8        |QASYMM8            |S32      |QASYMM8        |
75      * |QASYMM8        |QSYMM8_PER_CHANNEL |S32      |QASYMM8        |
76      * |QASYMM8_SIGNED |QASYMM8_SIGNED     |S32      |QASYMM8_SIGNED |
77      * |QASYMM8_SIGNED |QSYMM8_PER_CHANNEL |S32      |QASYMM8_SIGNED |
78      *
79      * @param[in]  input            Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
80      *                              while every optional dimension from 4 and above represent a batch of inputs.
81      *                              Data types supported: QASYMM8/QASYMM8_SIGNED/BFLOAT16/F16/F32.
82      * @param[in]  weights          Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
83      *                              Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/BFLOAT16/F16/F32.
84      * @param[in]  biases           Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
85      *                              Data type supported: Should match @p input data type, except for input of QASYMM8/QASYMM8_SIGNED type where biases should be of S32 type.
86      * @param[out] output           Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
87      *                              Data types supported: Same as @p input.
88      * @param[in]  conv_info        Contains padding and stride information described in @ref PadStrideInfo.
89      * @param[in]  weights_info     Specifies if the weights tensor has been reshaped with NEWeightsReshapeKernel. If this is not part of the fully connected layer the weights
90      *                              tensor has also been transposed with cpu::kernels::CpuGemmTranspose1xWKernel. Data type supported: Same as @p input.
91      * @param[in]  dilation         (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
92      * @param[in]  act_info         (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported.
93      * @param[in]  enable_fast_math (Optional) Enable fast math computation. In case this flag were set, the function could dispatch the fastest implementation
94      *                              available which may introduce a drop of accuracy as well. Default is false
95      * @param[in]  num_groups       (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is not supported
96      */
97     void configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, const PadStrideInfo &conv_info, const WeightsInfo &weights_info = WeightsInfo(),
98                    const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo(), bool enable_fast_math = false, unsigned int num_groups = 1);
99     /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMConvolutionLayer
100      *
101      * @param[in] input            Source tensor info. 3 lower dimensions represent a single input [width, height, IFM],
102      *                             while every optional dimension from 4 and above represent a batch of inputs.
103      *                             Data types supported: QASYMM8/QASYMM8_SIGNED/BFLOAT16/F16/F32.
104      * @param[in] weights          Weights tensor info. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM].
105      *                             Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8_PER_CHANNEL/BFLOAT16/F16/F32.
106      * @param[in] biases           Biases tensor info. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
107      *                             Data type supported: Should match @p input data type, except for input of QASYMM8/QASYMM8_SIGNED type where biases should be of S32 type.
108      * @param[in] output           Destination tensor info. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
109      *                             Data types supported: Same as @p input.
110      * @param[in] conv_info        Contains padding and stride information described in @ref PadStrideInfo.
111      * @param[in] weights_info     Specifies if the weights tensor has been reshaped with NEWeightsReshapeKernel. If this is not part of the fully connected layer the weights
112      *                             tensor has also been transposed with cpu::kernels::CpuGemmTranspose1xWKernel. Data type supported: Same as @p input.
113      * @param[in] dilation         (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
114      * @param[in] act_info         (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported.
115      * @param[in] enable_fast_math (Optional) Enable fast math computation. In case this flag were set, the function could dispatch the fastest implementation
116      *                             available which may introduce a drop of accuracy as well. Default is false
117      * @param[in] num_groups       (Optional) Number of groups when performing a grouped convolution. num_groups != 1 is not supported
118      *
119      * @return a status
120      */
121     static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, const PadStrideInfo &conv_info,
122                            const WeightsInfo &weights_info = WeightsInfo(), const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo(),
123                            bool enable_fast_math = false, unsigned int num_groups = 1);
124 
125     /** Static function to check if there is an optimized version of
126      * GEMM available for the input parameters.
127      *
128      * The method is intended to be used to find out the optimal
129      * memory layout to be used for the weights tensor when running
130      * variable weights execution.
131      *
132      * The user can query the database of optimised kernels in
133      * arm_gemm by specifying one of the enumerations of
134      * arm_compute::WeightFormat in the weight_format field of the input
135      * parameter weights_info. In case of success, the method
136      * writes the expected format in the output parameter
137      * expected_weight_format. The expected_weight_format can than be
138      * used in the configure method of the class for retrieving the
139      * best optimal kernel.
140      *
141      * Use case one - query for a specific format:
142      *
143      *     WeightInfo weights_info(..., arm_compute::WeightFormat::OHWIo4, ...); // Set the value of the input query.
144      *     if (NEGEMMConvolutionlayer::has_opt_impl(WeightFormat(), ...., weights_info, ...))
145      *     {
146      *       auto conv = std::unique_ptr<NEGEMMConvolutionlayer>();
147      *       conv->configure(..., weights_info, ...);  // uses the same WeightFormat the user wanted originally, OHWYo4.
148      *       conv->run(...);
149      *     }
150      *
151      * Use case two - query for any format that would be optimal for the GEMM to execute:
152      *
153      *     WeightInfo weights_info(..., arm_compute::WeightFormat::ANY, ...); // Set the value of the input query.
154      *     arm_compute::WeightFormat expected_wf;
155      *     if (NEGEMMConvolutionlayer::has_opt_impl(expected_wf, ...., weights_info, ...))
156      *     {
157      *       auto conv = std::unique_ptr<NEGEMMConvolutionlayer>();
158      *       // ... code to convert the layout of the weights tensor to the layout returned by has_opt_impl
159      *       WeightInfo new_weights_info(..., expected_wf, ...); // Set the value of the WeightFormat returned by has_opt_impl.
160      *       conv->configure(..., new_weights_info, ...);
161      *       conv->run(...);
162      *     }
163      *
164      * Notice that a GEMM configured with a WeightFormat other than
165      * UNSPECIFIED will run GEMM with variable weights mode.
166      *
167      * @param[out] expected_weight_format The arm_compute::WeightFormat expected by the kernel.
168      * @param[in]  src                    Source tensor info.
169      * @param[in]  weights                Weights tensor info.
170      * @param[in]  biases                 Biases tensor info. Shared biases supported.
171      * @param[in]  dst                    Destination tensor info.
172      * @param[in]  conv_info              Contains padding and stride information described in @ref PadStrideInfo.
173      * @param[in]  weights_info           (optional) Specifies additional configuration parameters for the weights of the GEMM computation.
174      * @param[in]  dilation               (Optional) Dilation, in elements, across x and y. Defaults to (1, 1).
175      * @param[in]  act_info               (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported. And no activation (i.e. Linear) which is the default value.
176      * @param[in]  enable_fast_math       (Optional) Enable fast math computation. In case this flag were set, the function could dispatch the fastest implementation
177      *
178      * @return a Status
179      */
180     static Status has_opt_impl(arm_compute::WeightFormat &expected_weight_format, const ITensorInfo *src, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *dst,
181                                const PadStrideInfo &conv_info,
182                                const WeightsInfo &weights_info = WeightsInfo(), const Size2D &dilation = Size2D(1U, 1U), const ActivationLayerInfo &act_info = ActivationLayerInfo(),
183                                bool enable_fast_math = false);
184     // Inherited methods overridden:
185     void run() override;
186     void prepare() override;
187 
188 private:
189     struct Impl;
190     std::unique_ptr<Impl> _impl;
191 };
192 } // namespace arm_compute
193 #endif /* ARM_COMPUTE_NEGEMMCONVOLUTIONLAYER_H */
194