1 /*
2 * Copyright (c) 2020-2022 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25 #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS)
26
27 #include "arm_compute/core/Helpers.h"
28 #include "arm_compute/core/ITensorPack.h"
29 #include "arm_compute/core/Window.h"
30
31 #include <cmath>
32 #include <cstddef>
33
34 #include "src/core/NEON/SVEMath.h"
35 #include <arm_sve.h>
36
37 namespace arm_compute
38 {
39 namespace cpu
40 {
sve_fp16_activation(const ITensor * src,ITensor * dst,const ActivationLayerInfo & act_info,const Window & window)41 void sve_fp16_activation(const ITensor *src, ITensor *dst, const ActivationLayerInfo &act_info, const Window &window)
42 {
43 const auto window_start_x = static_cast<int>(window.x().start());
44 const auto window_end_x = static_cast<int>(window.x().end());
45 const ActivationLayerInfo::ActivationFunction act = act_info.activation();
46
47 Window win_collapsed = window.collapse_if_possible(window, Window::DimZ);
48 win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
49
50 Iterator input(src, win_collapsed);
51 Iterator output(dst, win_collapsed);
52
53 const auto const_1 = svdup_n_f16(1.f);
54 const auto const_0 = svdup_n_f16(0.f);
55 const auto const_6 = svdup_n_f16(6.f);
56 const auto const_3 = svdup_n_f16(3.f);
57 const auto const_inv_6 = svdup_n_f16(0.166666667f);
58
59 const auto va = svdup_n_f16(act_info.a());
60 const auto vb = svdup_n_f16(act_info.b());
61 execute_window_loop(win_collapsed, [&](const Coordinates &)
62 {
63 const auto input_ptr = reinterpret_cast<const float16_t *>(input.ptr());
64 const auto output_ptr = reinterpret_cast<float16_t *>(output.ptr());
65
66 svfloat16_t tmp;
67
68 int x = window_start_x;
69 svbool_t pg = svwhilelt_b16(x, window_end_x);
70 do
71 {
72 const auto vin = svld1_f16(pg, input_ptr + x);
73 switch(act)
74 {
75 case ActivationLayerInfo::ActivationFunction::ABS:
76 tmp = svabs_f16_z(pg, vin);
77 break;
78 case ActivationLayerInfo::ActivationFunction::LINEAR:
79 tmp = svmla_f16_z(pg, vb, va, vin);
80 break;
81 case ActivationLayerInfo::ActivationFunction::LOGISTIC:
82 tmp = svinv_f16_z(pg, svadd_f16_z(pg, const_1, svexp_f16_z(pg, svneg_f16_z(pg, vin))));
83 break;
84 case ActivationLayerInfo::ActivationFunction::RELU:
85 tmp = svmax_f16_z(pg, const_0, vin);
86 break;
87 case ActivationLayerInfo::ActivationFunction::BOUNDED_RELU:
88 tmp = svmin_f16_z(pg, va, svmax_f16_z(pg, const_0, vin));
89 break;
90 case ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU:
91 tmp = svmin_f16_z(pg, va, svmax_f16_z(pg, vb, vin));
92 break;
93 case ActivationLayerInfo::ActivationFunction::LEAKY_RELU:
94 tmp = svadd_f16_z(pg, svmul_f16_z(pg, svmin_f16_z(pg, vin, const_0), va), svmax_f16_z(pg, vin, const_0));
95 break;
96 case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
97 tmp = svlog_f16_z(pg, svadd_f16_z(pg, const_1, svexp_f16_z(pg, vin)));
98 break;
99 case ActivationLayerInfo::ActivationFunction::ELU:
100 tmp = svsel_f16(svcmpgt_f16(pg, vin, const_0), vin, svmul_f16_z(pg, va, svsub_f16_z(pg, svexp_f16_z(pg, vin), const_1)));
101 break;
102 case ActivationLayerInfo::ActivationFunction::SQRT:
103 tmp = svsqrt_f16_z(pg, vin);
104 break;
105 case ActivationLayerInfo::ActivationFunction::SQUARE:
106 tmp = svmul_f16_z(pg, vin, vin);
107 break;
108 case ActivationLayerInfo::ActivationFunction::TANH:
109 tmp = svmul_f16_z(pg, va, svtanh_f16_z(pg, svmul_f16_z(pg, vb, vin)));
110 break;
111 case ActivationLayerInfo::ActivationFunction::IDENTITY:
112 tmp = vin;
113 break;
114 case ActivationLayerInfo::ActivationFunction::HARD_SWISH:
115 tmp = svmul_f16_z(pg, vin, svmul_f16_z(pg, const_inv_6, svmin_f16_z(pg, const_6, svmax_f16_z(pg, const_0, svadd_f16_z(pg, vin, const_3)))));
116 break;
117 case ActivationLayerInfo::ActivationFunction::SWISH:
118 tmp = svmul_f16_z(pg, vin, svinv_f16_z(pg, svadd_f16_z(pg, const_1, svexp_f16_z(pg, svneg_f16_z(pg, svmul_f16_z(pg, va, vin))))));
119 break;
120 default:
121 ARM_COMPUTE_ERROR("Unsupported activation function");
122 }
123 svst1_f16(pg, output_ptr + x, tmp);
124
125 x += svcnth();
126 pg = svwhilelt_b16(x, window_end_x);
127
128 }
129 while(svptest_any(svptrue_b16(), pg));
130 },
131 input, output);
132 }
133 } // namespace cpu
134 } // namespace arm_compute
135 #endif /* defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) */
136