xref: /aosp_15_r20/external/ComputeLibrary/arm_compute/runtime/CL/functions/CLInstanceNormalizationLayer.h (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2019-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_CLINSTANCENORMALIZATIONLAYER_H
25 #define ARM_COMPUTE_CLINSTANCENORMALIZATIONLAYER_H
26 
27 #include "arm_compute/core/Error.h"
28 #include "arm_compute/runtime/CL/CLTensor.h"
29 #include "arm_compute/runtime/IFunction.h"
30 
31 #include <memory>
32 
33 namespace arm_compute
34 {
35 class CLCompileContext;
36 class ICLTensor;
37 class ITensorInfo;
38 class ICLKernel;
39 class CLRuntimeContext;
40 
41 /** Basic function to perform a Instance normalization.
42  *
43  * This function runs the following kernels:
44  * -# @ref CLInstanceNormalizationLayerKernel
45  */
46 class CLInstanceNormalizationLayer : public IFunction
47 {
48 public:
49     /** Constructor
50      *
51      * @param[in] ctx Runtime context to be used by the function
52      */
53     CLInstanceNormalizationLayer(CLRuntimeContext *ctx = nullptr);
54 
55     /** Prevent instances of this class from being copied (As this class contains pointers) */
56     CLInstanceNormalizationLayer(const CLInstanceNormalizationLayer &) = delete;
57     /** Default move constructor */
58     CLInstanceNormalizationLayer(CLInstanceNormalizationLayer &&) = default;
59     /** Prevent instances of this class from being copied (As this class contains pointers) */
60     CLInstanceNormalizationLayer &operator=(const CLInstanceNormalizationLayer &) = delete;
61     /** Default move assignment operator */
62     CLInstanceNormalizationLayer &operator=(CLInstanceNormalizationLayer &&) = default;
63     /** Default destructor */
64     ~CLInstanceNormalizationLayer();
65 
66     /** Set the input and output tensors.
67      *
68      * Valid data layouts:
69      * - NHWC
70      * - NCHW
71      *
72      * Valid data type configurations:
73      * |src      |dst       |
74      * |:--------|:---------|
75      * |F16      |F16       |
76      * |F32      |F32       |
77      *
78      * @param[in, out] input               Source tensor. In case of @p output tensor = nullptr this tensor will store the result of the normalization.
79      *                                     Data types supported: F16/F32. Data layout supported: NHWC, NCHW
80      * @param[out]     output              Destination tensor. Data types and data layouts supported: same as @p input.
81      * @param[in]      gamma               (Optional) The scale scalar value applied to the normalized tensor. Defaults to 1.0
82      * @param[in]      beta                (Optional) The offset scalar value applied to the normalized tensor. Defaults to 0.0
83      * @param[in]      epsilon             (Optional) Lower bound value for the normalization. Defaults to 1e-12
84      * @param[in]      use_mixed_precision (Optional) Use mixed precision in case of FP16 execution
85      */
86     void configure(ICLTensor *input, ICLTensor *output, float gamma = 1.0f, float beta = 0.0f, float epsilon = 1e-12f, bool use_mixed_precision = true);
87     /** Set the input and output tensors.
88      *
89      * @param[in]      compile_context     The compile context to be used.
90      * @param[in, out] input               Source tensor. In case of @p output tensor = nullptr this tensor will store the result of the normalization.
91      *                                     Data types supported: F16/F32. Data layout supported: NHWC, NCHW
92      * @param[out]     output              Destination tensor. Data types and data layouts supported: same as @p input.
93      * @param[in]      gamma               (Optional) The scale scalar value applied to the normalized tensor. Defaults to 1.0
94      * @param[in]      beta                (Optional) The offset scalar value applied to the normalized tensor. Defaults to 0.0
95      * @param[in]      epsilon             (Optional) Lower bound value for the normalization. Defaults to 1e-12
96      * @param[in]      use_mixed_precision (Optional) Use mixed precision in case of FP16 execution
97      */
98     void configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, float gamma = 1.0f, float beta = 0.0f, float epsilon = 1e-12f, bool use_mixed_precision = true);
99 
100     /** Static function to check if given info will lead to a valid configuration of @ref CLInstanceNormalizationLayer.
101      *
102      * @param[in] input               Source tensor info. Data types supported: F16/F32. Data layout supported: NHWC, NCHW
103      * @param[in] output              Destination tensor info. Data types and data layouts supported: same as @p input.
104      * @param[in] gamma               (Optional) The scale scalar value applied to the normalized tensor. Defaults to 1.0
105      * @param[in] beta                (Optional) The offset scalar value applied to the normalized tensor. Defaults to 0.0
106      * @param[in] epsilon             (Optional) Lower bound value for the normalization. Defaults to 1e-12
107      * @param[in] use_mixed_precision (Optional) Use mixed precision in case of FP16 execution
108      *
109      * @return a status
110      */
111     static Status validate(const ITensorInfo *input, const ITensorInfo *output, float gamma = 1.0f, float beta = 0.0f, float epsilon = 1e-12f, bool use_mixed_precision = true);
112     void run() override;
113 
114 private:
115     std::unique_ptr<ICLKernel> _inst_norm_kernel; /**< Kernel to run */
116     std::unique_ptr<ICLKernel> _mean_var_kernel;  /**< Kernel to run */
117     CLTensor                   _mean_var_tensor;
118     CLRuntimeContext          *_ctx; /**< Context to use */
119 };
120 } // namespace arm_compute
121 #endif /* ARM_COMPUTE_CLINSTANCENORMALIZATIONLAYER_H */
122