xref: /aosp_15_r20/external/ComputeLibrary/src/core/NEON/kernels/NEBatchNormalizationLayerKernel.h (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2017-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_NEBATCHNORMALIZATIONLAYERKERNEL_H
25 #define ARM_COMPUTE_NEBATCHNORMALIZATIONLAYERKERNEL_H
26 
27 #include "src/core/NEON/INEKernel.h"
28 
29 namespace arm_compute
30 {
31 // Forward declarations
32 class ITensor;
33 
34 /** Interface for the batch normalization layer kernel.
35  */
36 class NEBatchNormalizationLayerKernel : public INEKernel
37 {
38 public:
name()39     const char *name() const override
40     {
41         return "NEBatchNormalizationLayerKernel";
42     }
43     /** Default constructor */
44     NEBatchNormalizationLayerKernel();
45     /** Prevent instances of this class from being copied (As this class contains pointers) */
46     NEBatchNormalizationLayerKernel(const NEBatchNormalizationLayerKernel &) = delete;
47     /** Prevent instances of this class from being copied (As this class contains pointers) */
48     NEBatchNormalizationLayerKernel &operator=(const NEBatchNormalizationLayerKernel &) = delete;
49     /** Default Move Constructor. */
50     NEBatchNormalizationLayerKernel(NEBatchNormalizationLayerKernel &&) = default;
51     /** Default move assignment operator */
52     NEBatchNormalizationLayerKernel &operator=(NEBatchNormalizationLayerKernel &&) = default;
53     /** Default destructor */
54     ~NEBatchNormalizationLayerKernel() = default;
55     /** Set the input and output tensors.
56      *
57      * @note If the output tensor is a nullptr, the batch normalization function will be performed in-place
58      *
59      * @param[in, out] input    Source tensor. In case of @p output tensor = nullptr, this tensor will store the result.
60      *                          3 lower dimensions represent a single input with dimensions [width, height, FM].
61      *                          The rest are optional and used for representing batches. Data types supported: F16/F32.
62      * @param[out]     output   Destination tensor. Output will have the same number of dimensions as input. Data type supported: same as @p input
63      * @param[in]      mean     Mean values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
64      * @param[in]      var      Variance values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
65      * @param[in]      beta     (Optional) Beta values tensor info. 1 dimension with size equal to the feature maps [FM]. If not provided, default value for beta is 0. Data types supported: Same as @p input
66      * @param[in]      gamma    (Optional) Gamma values tensor info. 1 dimension with size equal to the feature maps [FM]. If not provided, default value for gamma is 1. Data types supported: Same as @p input
67      * @param[in]      epsilon  (Optional) Small value to avoid division with zero. Default value is 0.001f.
68      * @param[in]      act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported.
69      */
70     void configure(ITensor *input, ITensor *output, const ITensor *mean, const ITensor *var, const ITensor *beta = nullptr, const ITensor *gamma = nullptr, float epsilon = 0.001f,
71                    ActivationLayerInfo act_info = ActivationLayerInfo());
72     /** Static function to check if given info will lead to a valid configuration of @ref NEBatchNormalizationLayerKernel
73      *
74      * @param[in] input    Source tensor info. In case of @p output tensor = nullptr, this tensor will store the result.
75      *                     3 lower dimensions represent a single input with dimensions [width, height, FM].
76      *                     The rest are optional and used for representing batches. Data types supported: F16/F32.
77      * @param[in] output   Destination tensor info. Output will have the same number of dimensions as input. Data type supported: same as @p input
78      * @param[in] mean     Mean values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
79      * @param[in] var      Variance values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
80      * @param[in] beta     (Optional) Beta values tensor info. 1 dimension with size equal to the feature maps [FM]. If not provided, default value for beta is 0. Data types supported: Same as @p input
81      * @param[in] gamma    (Optional) Gamma values tensor info. 1 dimension with size equal to the feature maps [FM]. If not provided, default value for gamma is 1. Data types supported: Same as @p input
82      * @param[in] epsilon  (Optional) Small value to avoid division with zero. Default value is 0.001f.
83      * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported.
84      *
85      * @return a status
86      */
87     static Status validate(const ITensorInfo *input, const ITensorInfo *output,
88                            const ITensorInfo *mean, const ITensorInfo *var,
89                            const ITensorInfo *beta = nullptr, const ITensorInfo *gamma = nullptr,
90                            float epsilon = 0.001f, ActivationLayerInfo act_info = ActivationLayerInfo());
91 
92     // Inherited methods overridden:
93     void run(const Window &window, const ThreadInfo &info) override;
94 
95 private:
96     /** Configure execution function in case of non-fused activation **/
97     void configure_non_fused();
98     /** Configure execution function in case of fused activation **/
99     void configure_fused();
100 
101     /** Template function to run batch normalization on fp32
102      *
103      * @tparam T                Specialization data type
104      * @tparam fused_activation Boolean that flags if its a fused activation or not
105      * @tparam F                Activation function functor to run
106      *
107      * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()).
108      */
109     template <typename T, bool fused_activation, typename F>
110     void batch_normalization_nchw(const Window &window);
111     /** Template function to run batch normalization on fp32 on tensors with NHWC format
112      *
113      * @tparam T                Specialization data type
114      * @tparam fused_activation Boolean that flags if its a fused activation or not
115      * @tparam F                Activation function functor to run
116      *
117      * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()).
118      */
119     template <typename T, bool fused_activation, typename F>
120     void batch_normalization_nhwc(const Window &window);
121     /** Common signature for all the batch normalization functions
122      *
123      * @param[in] window Region on which to execute the kernel.
124      */
125     using BatchNormFunctionPtr = void (NEBatchNormalizationLayerKernel::*)(const Window &window);
126 
127 private:
128     BatchNormFunctionPtr _func;
129     ITensor             *_input;
130     ITensor             *_output;
131     const ITensor       *_mean;
132     const ITensor       *_var;
133     const ITensor       *_gamma;
134     const ITensor       *_beta;
135     float                _epsilon;
136     ActivationLayerInfo  _act_info;
137 };
138 } // namespace arm_compute
139 #endif /*ARM_COMPUTE_NEBATCHNORMALIZATIONLAYERKERNEL_H */
140