xref: /aosp_15_r20/external/ComputeLibrary/src/core/NEON/kernels/NEInstanceNormalizationLayerKernel.cpp (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2019-2022 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "src/core/NEON/kernels/NEInstanceNormalizationLayerKernel.h"
25 
26 #include "arm_compute/core/Error.h"
27 #include "arm_compute/core/Helpers.h"
28 #include "arm_compute/core/ITensor.h"
29 #include "arm_compute/core/KernelDescriptors.h"
30 #include "arm_compute/core/TensorInfo.h"
31 #include "arm_compute/core/Utils.h"
32 #include "arm_compute/core/Validate.h"
33 #include "arm_compute/core/Window.h"
34 #include "src/core/CPP/Validate.h"
35 #include "src/core/NEON/NEMath.h"
36 #include "src/core/NEON/wrapper/wrapper.h"
37 #include "src/core/common/Registrars.h"
38 #include "src/core/helpers/AutoConfiguration.h"
39 #include "src/core/helpers/WindowHelpers.h"
40 #include "src/cpu/kernels/instancenorm/list.h"
41 
42 #include <arm_neon.h>
43 
44 namespace arm_compute
45 {
46 namespace
47 {
48 struct InstanceNormSelectorData
49 {
50     DataType dt;
51 };
52 
53 using InstanceNormSelctorPtr = std::add_pointer<bool(const InstanceNormSelectorData &data)>::type;
54 using InstanceNormUKernelPtr = std::add_pointer<void(ITensor *input, ITensor *output, float gamma, float beta, float epsilon, bool use_mixed_precision, const Window &window)>::type;
55 
56 struct InstanceNormKernel
57 {
58     const char                  *name;
59     const InstanceNormSelctorPtr is_selected;
60     InstanceNormUKernelPtr       ukernel;
61 };
62 
63 static const InstanceNormKernel available_kernels[] =
64 {
65     {
66         "fp32_neon_instancenorm",
__anon7f4e88f70202() 67         [](const InstanceNormSelectorData & data) { return data.dt == DataType::F32; },
68         REGISTER_FP32_NEON(arm_compute::cpu::neon_fp32_instancenorm)
69     },
70 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
71     {
72         "fp16_neon_instancenorm",
__anon7f4e88f70302() 73         [](const InstanceNormSelectorData & data) { return data.dt == DataType::F16; },
74         REGISTER_FP16_NEON(arm_compute::cpu::neon_fp16_instancenorm)
75     },
76 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
77 };
78 
79 /** Micro-kernel selector
80  *
81  * @param[in] data Selection data passed to help pick the appropriate micro-kernel
82  *
83  * @return A matching micro-kernel else nullptr
84  */
get_implementation(const InstanceNormSelectorData & data)85 const InstanceNormKernel *get_implementation(const InstanceNormSelectorData &data)
86 {
87     for(const auto &uk : available_kernels)
88     {
89         if(uk.is_selected(data))
90         {
91             return &uk;
92         }
93     }
94     return nullptr;
95 }
96 
validate_arguments(const ITensorInfo * input,const ITensorInfo * output,float gamma,float beta,float epsilon)97 Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, float gamma, float beta, float epsilon)
98 {
99     ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input);
100     ARM_COMPUTE_UNUSED(gamma);
101     ARM_COMPUTE_UNUSED(beta);
102     ARM_COMPUTE_RETURN_ERROR_ON_MSG(epsilon == 0.f, "Epsilon must be different than 0");
103 
104     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(input, DataType::F16, DataType::F32);
105     ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_layout() == DataLayout::NHWC, "NHWC data layout is not supported by the kernel directly");
106 
107     if(output != nullptr && output->total_size() != 0)
108     {
109         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
110         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
111         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output);
112         ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->num_channels() != output->num_channels(), "Input and output have different number of channels");
113     }
114     return Status{};
115 }
116 
validate_and_configure_window(ITensorInfo * input,ITensorInfo * output)117 std::tuple<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output)
118 {
119     // We handle the planes manually
120     Window win = calculate_max_window(*input, Steps(1));
121 
122     // Output auto initialization if not yet initialized
123     auto_init_if_empty(*output, input->tensor_shape(), 1, input->data_type());
124 
125     // NEInstanceNormalizationLayerKernel doesn't need padding so update_window_and_padding() can be skipped
126     return std::make_pair(Status{}, win);
127 }
128 } // namespace
129 
NEInstanceNormalizationLayerKernel()130 NEInstanceNormalizationLayerKernel::NEInstanceNormalizationLayerKernel()
131     : _input(nullptr), _output(nullptr), _gamma(1), _beta(0), _epsilon(1e-12)
132 {
133 }
134 
configure(ITensor * input,ITensor * output,const InstanceNormalizationLayerKernelInfo & info)135 void NEInstanceNormalizationLayerKernel::configure(ITensor *input, ITensor *output, const InstanceNormalizationLayerKernelInfo &info)
136 {
137     ARM_COMPUTE_ERROR_ON_NULLPTR(input);
138 
139     _input               = input;
140     _output              = output == nullptr ? input : output;
141     _gamma               = info.gamma;
142     _beta                = info.beta;
143     _epsilon             = info.epsilon;
144     _use_mixed_precision = info.use_mixed_precision;
145 
146     ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(_input->info(), _output->info(), _gamma, _beta, _epsilon));
147 
148     // Configure kernel window
149     auto win_config = validate_and_configure_window(_input->info(), _output->info());
150     ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config));
151 
152     INEKernel::configure(std::get<1>(win_config));
153 }
154 
validate(const ITensorInfo * input,const ITensorInfo * output,const InstanceNormalizationLayerKernelInfo & info)155 Status NEInstanceNormalizationLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const InstanceNormalizationLayerKernelInfo &info)
156 {
157     ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, info.gamma, info.beta, info.epsilon));
158     ARM_COMPUTE_RETURN_ON_ERROR(std::get<0>(validate_and_configure_window(input->clone().get(), (output == nullptr ? input->clone().get() : output->clone().get()))));
159     return Status{};
160 }
161 
run(const Window & window,const ThreadInfo & info)162 void NEInstanceNormalizationLayerKernel::run(const Window &window, const ThreadInfo &info)
163 {
164     ARM_COMPUTE_UNUSED(info);
165     ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
166     ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
167 
168     const auto *uk = get_implementation(InstanceNormSelectorData{ _input->info()->data_type() });
169     ARM_COMPUTE_ERROR_ON(uk == nullptr || uk->ukernel == nullptr);
170 
171     uk->ukernel(_input, _output, _gamma, _beta, _epsilon, _use_mixed_precision, window);
172 }
173 } // namespace arm_compute
174