xref: /aosp_15_r20/external/ComputeLibrary/src/cpu/kernels/activation/generic/neon/impl.h (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2020-2022 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_compute/core/Helpers.h"
25 #include "arm_compute/core/Window.h"
26 #include "src/core/NEON/wrapper/wrapper.h"
27 namespace arm_compute
28 {
29 namespace cpu
30 {
31 /** Constant parameters needed by the activation implementation.
32  *  These parameters differ for each floating type
33  *
34  * @note This are passed as a struct as C++ does not allow float as a template parameter until C++20
35  **/
36 struct ActFpImplParams
37 {
38     float delta;  /**< Minimum delta needed to avoid NaN on corner-cases of elementary functions */
39     int   step_x; /**< Window step at the x dimension */
40 };
41 
42 #ifndef __aarch64__
mask_float_vector(const float32x4_t & in,const uint32x4_t & mask)43 inline float32x4_t mask_float_vector(const float32x4_t &in, const uint32x4_t &mask)
44 {
45     auto int_in = vreinterpretq_u32_f32(in);
46     return vreinterpretq_f32_u32(wrapper::vand(int_in, mask));
47 }
48 #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS)
mask_float_vector(const float16x8_t & in,const uint16x8_t & mask)49 inline float16x8_t mask_float_vector(const float16x8_t &in, const uint16x8_t &mask)
50 {
51     auto int_in = vreinterpretq_u16_f16(in);
52     return vreinterpretq_f16_u16(wrapper::vand(int_in, mask));
53 }
54 #endif //defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS)
55 #endif /* __aarch64__ */
56 
57 template <typename T, const ActFpImplParams &P>
fp_neon_activation_impl(const ITensor * src,ITensor * dst,const ActivationLayerInfo & act_info,const Window & window)58 void fp_neon_activation_impl(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window)
59 {
60     /** SIMD vector tag type. */
61     using ExactTagType                                           = typename arm_compute::wrapper::traits::neon_bitvector_tag_t<T, wrapper::traits::BitWidth::W128>;
62     constexpr int                                 window_step_x  = P.step_x;
63     const auto                                    window_start_x = static_cast<int>(window.x().start());
64     const auto                                    window_end_x   = static_cast<int>(window.x().end());
65     const ActivationLayerInfo::ActivationFunction act            = act_info.activation();
66     Window                                        win_collapsed  = window.collapse_if_possible(window, Window::DimZ);
67     win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
68     Iterator input(src, win_collapsed);
69     Iterator output(dst, win_collapsed);
70     // In case of non-aarch64, a small delta value is added to the input
71     // to prevent NAN values caused by zeros in inputs to SQRT.
72     // In case of aarh64, we call vsqrt directly, so we don't use delta.
73 #ifndef __aarch64__
74     const auto delta = wrapper::vdup_n(static_cast<T>(P.delta), ExactTagType {});
75 #else  /* #ifndef __aarch64__ */
76     const auto const_inv_2      = wrapper::vdup_n(static_cast<T>(0.5f), ExactTagType {});
77     const auto const_inv_sqrt_2 = wrapper::vdup_n(static_cast<T>(0.70710678118f), ExactTagType{});
78 #endif /* __aarch64__ */
79     const auto      const_1           = wrapper::vdup_n(static_cast<T>(1.f), ExactTagType {});
80     const auto      const_0           = wrapper::vdup_n(static_cast<T>(0.f), ExactTagType{});
81     const auto      const_6           = wrapper::vdup_n(static_cast<T>(6.f), ExactTagType{});
82     const auto      const_3           = wrapper::vdup_n(static_cast<T>(3.f), ExactTagType{});
83     const auto      const_inv_6       = wrapper::vdup_n(static_cast<T>(0.166666667f), ExactTagType{});
84     constexpr float soft_relu_thresh  = 12.f;
85     const auto      vsoft_relu_thresh = wrapper::vdup_n(static_cast<T>(soft_relu_thresh), ExactTagType{});
86     const auto      va                = wrapper::vdup_n(static_cast<T>(act_info.a()), ExactTagType{});
87     const auto      vb                = wrapper::vdup_n(static_cast<T>(act_info.b()), ExactTagType{});
88     const auto      a                 = static_cast<T>(act_info.a());
89     const auto      b                 = static_cast<T>(act_info.b());
90     execute_window_loop(win_collapsed, [&](const Coordinates &)
91     {
92         const auto input_ptr  = reinterpret_cast<const T *>(input.ptr());
93         const auto output_ptr = reinterpret_cast<T *>(output.ptr());
94         wrapper::traits::neon_bitvector_t<T, wrapper::traits::BitWidth::W128> tmp;
95         // Compute S elements per iteration
96         int x = window_start_x;
97         for(; x <= (window_end_x - window_step_x); x += window_step_x)
98         {
99             const auto vin = wrapper::vloadq(input_ptr + x);
100             switch(act)
101             {
102                 case ActivationLayerInfo::ActivationFunction::ABS:
103                     tmp = wrapper::vabs(vin);
104                     break;
105                 case ActivationLayerInfo::ActivationFunction::LINEAR:
106                     tmp = wrapper::vmla(vb, va, vin);
107                     break;
108                 case ActivationLayerInfo::ActivationFunction::LOGISTIC:
109                     tmp = wrapper::vinv(wrapper::vadd(const_1, wrapper::vexpq(wrapper::vneg(vin))));
110                     break;
111                 case ActivationLayerInfo::ActivationFunction::RELU:
112                     tmp = wrapper::vmax(const_0, vin);
113                     break;
114                 case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
115                     tmp = wrapper::vmin(va, wrapper::vmax(const_0, vin));
116                     break;
117                 case ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU:
118                     tmp = wrapper::vmin(va, wrapper::vmax(vb, vin));
119                     break;
120                 case ActivationLayerInfo::ActivationFunction::LEAKY_RELU:
121                     tmp = wrapper::vbsl(wrapper::vcgt(vin, const_0), vin, wrapper::vmul(va, vin));
122                     break;
123                 case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
124                     tmp = wrapper::vbsl(wrapper::vcgt(vin, vsoft_relu_thresh), vin, wrapper::vlog(wrapper::vadd(const_1, wrapper::vexpq(vin))));
125                     break;
126                 case ActivationLayerInfo::ActivationFunction::ELU:
127                     tmp = wrapper::vbsl(wrapper::vcge(vin, const_0), vin, wrapper::vmul(va, wrapper::vsub(wrapper::vexpq(vin), const_1)));
128                     break;
129                 case ActivationLayerInfo::ActivationFunction::SQRT:
130 #ifdef __aarch64__
131                     tmp = wrapper::vsqrt(vin);
132 #else  /* __aarch64__ */
133                     {
134                         const auto bitmask = wrapper::vceq(vin, wrapper::vdup_n(0.f, ExactTagType{}));
135                         tmp                 = wrapper::vinv(wrapper::vinvsqrt(wrapper::vadd(vin, mask_float_vector(delta, bitmask))));
136                         tmp                 = mask_float_vector(tmp, wrapper::vnot(bitmask));
137                     }
138 #endif /* __aarch64__ */
139                     break;
140                 case ActivationLayerInfo::ActivationFunction::SQUARE:
141                     tmp = wrapper::vmul(vin, vin);
142                     break;
143                 case ActivationLayerInfo::ActivationFunction::TANH:
144                     tmp = wrapper::vmul(va, wrapper::vtanh(wrapper::vmul(vb, vin)));
145                     break;
146                 case ActivationLayerInfo::ActivationFunction::IDENTITY:
147                     tmp = vin;
148                     break;
149                 case ActivationLayerInfo::ActivationFunction::HARD_SWISH:
150                     tmp = wrapper::vmul(vin, wrapper::vmul(const_inv_6, wrapper::vmin(const_6, wrapper::vmax(const_0, wrapper::vadd(vin, const_3)))));
151                     break;
152                 case ActivationLayerInfo::ActivationFunction::SWISH:
153                     tmp = wrapper::vmul(vin, wrapper::vinv(wrapper::vadd(const_1, wrapper::vexpq(wrapper::vneg(wrapper::vmul(va, vin))))));
154                     break;
155 #ifdef __aarch64__
156                 case ActivationLayerInfo::ActivationFunction::GELU:
157                     tmp = wrapper::vmul(vin, wrapper::vmul(const_inv_2, wrapper::vadd(const_1, wrapper::verf(wrapper::vmul(vin, const_inv_sqrt_2)))));
158                     break;
159 #endif /* __aarch64__ */
160                 default:
161                     ARM_COMPUTE_ERROR("Unsupported activation function");
162             }
163             wrapper::vstore(output_ptr + x, tmp);
164         }
165         // Compute left-over elements
166         for(; x < window_end_x; ++x)
167         {
168             const T in = *(reinterpret_cast<const T *>(input_ptr + x));
169             T       tmp;
170             switch(act)
171             {
172                 case ActivationLayerInfo::ActivationFunction::ABS:
173                     tmp = std::abs(in);
174                     break;
175                 case ActivationLayerInfo::ActivationFunction::LINEAR:
176                     tmp = a * in + b;
177                     break;
178                 case ActivationLayerInfo::ActivationFunction::LOGISTIC:
179                     tmp = static_cast<T>(1) / (static_cast<T>(1) + std::exp(-in));
180                     break;
181                 case ActivationLayerInfo::ActivationFunction::RELU:
182                     tmp = std::max<T>(static_cast<T>(0), in);
183                     break;
184                 case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
185                     tmp = std::min<T>(a, std::max(static_cast<T>(0), in));
186                     break;
187                 case ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU:
188                     tmp = std::min<T>(a, std::max<T>(b, in));
189                     break;
190                 case ActivationLayerInfo::ActivationFunction::LEAKY_RELU:
191                     tmp = (in > 0) ? in : a * in;
192                     break;
193                 case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
194                     tmp = (in > soft_relu_thresh) ? in : std::log(static_cast<T>(1) + std::exp(in));
195                     break;
196                 case ActivationLayerInfo::ActivationFunction::ELU:
197                     tmp = (in >= 0) ? in : a * (std::exp(in) - 1);
198                     break;
199                 case ActivationLayerInfo::ActivationFunction::SQRT:
200                     tmp = std::sqrt(in);
201                     break;
202                 case ActivationLayerInfo::ActivationFunction::SQUARE:
203                     tmp = in * in;
204                     break;
205                 case ActivationLayerInfo::ActivationFunction::TANH:
206                     tmp = a * std::tanh(b * in);
207                     break;
208                 case ActivationLayerInfo::ActivationFunction::IDENTITY:
209                     tmp = in;
210                     break;
211                 case ActivationLayerInfo::ActivationFunction::HARD_SWISH:
212                     tmp = in * ((std::min(std::max((in + 3), 0.0f), 6.0f)) * 0.166666667f);
213                     break;
214                 case ActivationLayerInfo::ActivationFunction::SWISH:
215                     tmp = in / (static_cast<T>(1) + std::exp(-a*in));
216                     break;
217                 case ActivationLayerInfo::ActivationFunction::GELU:
218                     tmp = in * static_cast<T>(0.5f * (1.0f + erff(static_cast<float>(in) / 1.41421356237f)));
219                     break;
220                 default:
221                     ARM_COMPUTE_ERROR("Unsupported activation function");
222             }
223             *(output_ptr + x) = tmp;
224         }
225     },
226     input, output);
227 }
228 } // namespace cpu
229 } // namespace arm_compute
230