1 /*
2  * Copyright (c) 2019-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_NEINSTANCENORMALIZATIONLAYER_H
25 #define ARM_COMPUTE_NEINSTANCENORMALIZATIONLAYER_H
26 
27 #include "arm_compute/runtime/IFunction.h"
28 #include "arm_compute/runtime/IMemoryManager.h"
29 #include "arm_compute/runtime/MemoryGroup.h"
30 #include "arm_compute/runtime/NEON/functions/NEPermute.h"
31 #include "arm_compute/runtime/NEON/functions/NEReductionOperation.h"
32 #include "arm_compute/runtime/Tensor.h"
33 
34 #include <memory>
35 
36 namespace arm_compute
37 {
38 class ITensor;
39 class NEInstanceNormalizationLayerKernel;
40 
41 /** Basic function to perform a Instance normalization.
42  *
43  * This function runs the following kernels:
44  * -# @ref NEInstanceNormalizationLayerKernel
45  */
46 class NEInstanceNormalizationLayer : public IFunction
47 {
48 public:
49     /** Constructor */
50     NEInstanceNormalizationLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
51     /** Prevent instances of this class from being copied (As this class contains pointers) */
52     NEInstanceNormalizationLayer(const NEInstanceNormalizationLayer &) = delete;
53     /** Prevent instances of this class from being copied (As this class contains pointers) */
54     NEInstanceNormalizationLayer &operator=(const NEInstanceNormalizationLayer &) = delete;
55     /** Prevent instances of this class from being moved (As this class contains non movable objects) */
56     NEInstanceNormalizationLayer(NEInstanceNormalizationLayer &&) = delete;
57     /** Prevent instances of this class from being moved (As this class contains non movable objects) */
58     NEInstanceNormalizationLayer &operator=(NEInstanceNormalizationLayer &&) = delete;
59     /** Default destructor */
60     ~NEInstanceNormalizationLayer();
61     /** Set the input and output tensors.
62      *
63      * Valid data layouts:
64      * - NHWC
65      * - NCHW
66      *
67      * Valid data type configurations:
68      * |src      |dst       |
69      * |:--------|:---------|
70      * |F16      |F16       |
71      * |F32      |F32       |
72      *
73      * @param[in, out] input   Source tensor. In case of @p output tensor = nullptr this tensor will store the result of the normalization.
74      *                         Data types supported: F16/F32. Data layout supported: NHWC, NCHW
75      * @param[out]     output  Destination tensor. Data types and data layouts supported: same as @p input.
76      * @param[in]      gamma   (Optional) The scale scalar value applied to the normalized tensor. Defaults to 1.0
77      * @param[in]      beta    (Optional) The offset scalar value applied to the normalized tensor. Defaults to 0.0
78      * @param[in]      epsilon (Optional) Lower bound value for the normalization. Defaults to 1e-12
79      */
80     void configure(ITensor *input, ITensor *output, float gamma = 1.0f, float beta = 0.0f, float epsilon = 1e-12f);
81 
82     /** Static function to check if given info will lead to a valid configuration of @ref NEInstanceNormalizationLayer.
83      *
84      * @param[in] input   Source tensor info. Data types supported: F16/F32. Data layout supported: NHWC, NCHW
85      * @param[in] output  Destination tensor info. Data types and data layouts supported: same as @p input.
86      * @param[in] gamma   (Optional) The scale scalar value applied to the normalized tensor. Defaults to 1.0
87      * @param[in] beta    (Optional) The offset scalar value applied to the normalized tensor. Defaults to 0.0
88      * @param[in] epsilon (Optional) Lower bound value for the normalization. Defaults to 1e-12
89      *
90      * @return a status
91      */
92     static Status validate(const ITensorInfo *input, const ITensorInfo *output, float gamma = 1.0f, float beta = 0.0f, float epsilon = 1e-12f);
93 
94     // Inherited methods overridden:
95     void run() override;
96 
97 private:
98     MemoryGroup                                         _memory_group;
99     std::unique_ptr<NEInstanceNormalizationLayerKernel> _normalization_kernel;
100     bool                                                _is_nchw;
101     NEPermute                                           _permute_input;
102     NEPermute                                           _permute_output;
103     Tensor                                              _permuted_input;
104     Tensor                                              _permuted_output;
105 };
106 }
107 #endif /* ARM_COMPUTE_NEINSTANCENORMALIZATIONLAYER_H */
108