xref: /aosp_15_r20/external/ComputeLibrary/src/cpu/kernels/fuse_batch_normalization/nchw/all.cpp (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2018-2022 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 
25 #include "src/cpu/kernels/fuse_batch_normalization/generic/impl.h"
26 
27 namespace arm_compute
28 {
29 namespace cpu
30 {
31 template <typename T>
fused_batch_normalization_dwc_nchw(const ITensor * dwc_weights,const ITensor * dwc_bias,ITensor * fused_weights,ITensor * fused_bias,const ITensor * bn_mean,const ITensor * bn_var,const ITensor * bn_beta,const ITensor * bn_gamma,float epsilon,const Window & window)32 void fused_batch_normalization_dwc_nchw(const ITensor *dwc_weights, const ITensor *dwc_bias, ITensor *fused_weights, ITensor *fused_bias,
33                                         const ITensor *bn_mean, const ITensor *bn_var, const ITensor *bn_beta, const ITensor *bn_gamma, float epsilon, const Window &window)
34 {
35     using ScalarType   = T;
36     const int size     = 16 / dwc_weights->info()->element_size();
37     using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<T, wrapper::traits::BitWidth::W128>;
38 
39     const bool run_in_place_weights = (fused_weights == nullptr) || (fused_weights == dwc_weights);
40     const bool run_in_place_bias    = (fused_bias == nullptr) || (dwc_bias != nullptr && fused_bias == dwc_bias);
41 
42     // Set build options
43     Window win = window;
44     win.set(Window::DimX, Window::Dimension(0, 1, 1));
45 
46     const int  window_step_x  = size;
47     const auto window_start_x = static_cast<int>(window.x().start());
48     const auto window_end_x   = static_cast<int>(window.x().end());
49 
50     Iterator dwc_w_in(dwc_weights, win);
51     Iterator dwc_w_out(run_in_place_weights ? dwc_weights : fused_weights, win);
52 
53     const auto dwc_bias_in  = (dwc_bias != nullptr ? reinterpret_cast<ScalarType *>(dwc_bias->ptr_to_element(Coordinates(0, 0))) : nullptr);
54     auto       dwc_bias_out = (run_in_place_bias ? dwc_bias_in : reinterpret_cast<ScalarType *>(fused_bias->ptr_to_element(Coordinates(0, 0))));
55 
56     const auto input_mean  = reinterpret_cast<const ScalarType *>(bn_mean->ptr_to_element(Coordinates(0, 0)));
57     const auto input_var   = reinterpret_cast<const ScalarType *>(bn_var->ptr_to_element(Coordinates(0, 0)));
58     const auto input_gamma = (bn_gamma != nullptr) ? reinterpret_cast<const ScalarType *>(bn_gamma->ptr_to_element(Coordinates(0, 0))) : nullptr;
59     const auto input_beta  = (bn_beta != nullptr) ? reinterpret_cast<const ScalarType *>(bn_beta->ptr_to_element(Coordinates(0, 0))) : nullptr;
60 
61     auto       mean_vec    = wrapper::vdup_n(ScalarType(0), ExactTagType{});
62     auto       var_vec     = wrapper::vdup_n(ScalarType(0), ExactTagType{});
63     auto       gamma_vec   = wrapper::vdup_n(ScalarType(1), ExactTagType{});
64     auto       beta_vec    = wrapper::vdup_n(ScalarType(0), ExactTagType{});
65     auto       rvar_vec    = wrapper::vdup_n(ScalarType(0), ExactTagType{});
66     const auto epsilon_vec = wrapper::vdup_n(ScalarType(epsilon), ExactTagType{});
67 
68     auto mean               = ScalarType(0.0);
69     auto var                = ScalarType(0.0);
70     auto gamma              = ScalarType(1.0);
71     auto beta               = ScalarType(0.0);
72     auto dwc_bias_in_scalar = ScalarType(0.0);
73     execute_window_loop(win, [&](const Coordinates & id)
74     {
75         var = input_var[id[2]];
76         if(input_gamma != nullptr)
77         {
78             gamma = input_gamma[id[2]];
79         }
80 
81         if(id[1] == 0)
82         {
83             mean = input_mean[id[2]];
84 
85             // Construct vectors
86             mean_vec = wrapper::vdup_n(mean, ExactTagType{});
87             if(input_beta != nullptr)
88             {
89                 beta     = input_beta[id[2]];
90                 beta_vec = wrapper::vdup_n(beta, ExactTagType{});
91             }
92 
93             if(dwc_bias_in != nullptr)
94             {
95                 dwc_bias_in_scalar = dwc_bias_in[id[2]];
96             }
97 
98             auto dwc_bias_tmp_scalar = (dwc_bias_in_scalar - mean) / std::sqrt(var + ScalarType(epsilon));
99             dwc_bias_out[id[2]]      = (dwc_bias_tmp_scalar * gamma) + beta;
100         }
101 
102         int  x             = window_start_x;
103         auto dwc_w_in_ptr  = reinterpret_cast<const ScalarType *>(dwc_w_in.ptr());
104         auto dwc_w_out_ptr = reinterpret_cast<ScalarType *>(dwc_w_out.ptr());
105         var_vec            = wrapper::vdup_n(var, ExactTagType{});
106         gamma_vec          = wrapper::vdup_n(gamma, ExactTagType{});
107         rvar_vec           = wrapper::vinvsqrt(wrapper::vadd(var_vec, epsilon_vec));
108 
109         for(; x <= (window_end_x - window_step_x); x += window_step_x)
110         {
111             auto wn = wrapper::vloadq(dwc_w_in_ptr + x);
112             wn      = wrapper::vmul(wn, rvar_vec);
113             wn      = wrapper::vmul(wn, gamma_vec);
114 
115             // Store results
116             wrapper::vstore(dwc_w_out_ptr + x, wn);
117         }
118 
119         // Compute left-over elements
120         for(; x < window_end_x; ++x)
121         {
122             *(dwc_w_out_ptr + x) = *(dwc_w_in_ptr + x) / std::sqrt(var + ScalarType(epsilon)) * gamma;
123         }
124     },
125     dwc_w_in, dwc_w_out);
126 }
127 
fused_batch_normalization_dwc_nchw_f32(const ITensor * dwc_weights,const ITensor * dwc_bias,ITensor * fused_weights,ITensor * fused_bias,const ITensor * bn_mean,const ITensor * bn_var,const ITensor * bn_beta,const ITensor * bn_gamma,float epsilon,const Window & window)128 void fused_batch_normalization_dwc_nchw_f32(const ITensor *dwc_weights, const ITensor *dwc_bias, ITensor *fused_weights, ITensor *fused_bias,
129                                             const ITensor *bn_mean, const ITensor *bn_var, const ITensor *bn_beta, const ITensor *bn_gamma, float epsilon, const Window &window)
130 {
131     return fused_batch_normalization_dwc_nchw<float32_t>(dwc_weights, dwc_bias, fused_weights, fused_bias,
132                                                          bn_mean, bn_var, bn_beta, bn_gamma, epsilon, window);
133 }
134 
135 #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS)
fused_batch_normalization_dwc_nchw_f16(const ITensor * dwc_weights,const ITensor * dwc_bias,ITensor * fused_weights,ITensor * fused_bias,const ITensor * bn_mean,const ITensor * bn_var,const ITensor * bn_beta,const ITensor * bn_gamma,float epsilon,const Window & window)136 void fused_batch_normalization_dwc_nchw_f16(const ITensor *dwc_weights, const ITensor *dwc_bias, ITensor *fused_weights, ITensor *fused_bias,
137                                             const ITensor *bn_mean, const ITensor *bn_var, const ITensor *bn_beta, const ITensor *bn_gamma, float epsilon, const Window &window)
138 {
139     return fused_batch_normalization_dwc_nchw<float16_t>(dwc_weights, dwc_bias, fused_weights, fused_bias,
140                                                          bn_mean, bn_var, bn_beta, bn_gamma, epsilon, window);
141 }
142 #endif /* defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) */
143 
144 } // namespace cpu
145 } // namespace arm_compute
146