xref: /aosp_15_r20/external/arm-optimized-routines/pl/math/v_tanhf_2u6.c (revision 412f47f9e737e10ed5cc46ec6a8d7fa2264f8a14)
1*412f47f9SXin Li /*
2*412f47f9SXin Li  * Single-precision vector tanh(x) function.
3*412f47f9SXin Li  *
4*412f47f9SXin Li  * Copyright (c) 2022-2023, Arm Limited.
5*412f47f9SXin Li  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6*412f47f9SXin Li  */
7*412f47f9SXin Li 
8*412f47f9SXin Li #include "v_math.h"
9*412f47f9SXin Li #include "pl_sig.h"
10*412f47f9SXin Li #include "pl_test.h"
11*412f47f9SXin Li 
12*412f47f9SXin Li #include "v_expm1f_inline.h"
13*412f47f9SXin Li 
14*412f47f9SXin Li static const struct data
15*412f47f9SXin Li {
16*412f47f9SXin Li   struct v_expm1f_data expm1f_consts;
17*412f47f9SXin Li   uint32x4_t boring_bound, large_bound, onef;
18*412f47f9SXin Li } data = {
19*412f47f9SXin Li   .expm1f_consts = V_EXPM1F_DATA,
20*412f47f9SXin Li   /* 0x1.205966p+3, above which tanhf rounds to 1 (or -1 for  negative).  */
21*412f47f9SXin Li   .boring_bound = V4 (0x41102cb3),
22*412f47f9SXin Li   .large_bound = V4 (0x7f800000),
23*412f47f9SXin Li   .onef = V4 (0x3f800000),
24*412f47f9SXin Li };
25*412f47f9SXin Li 
26*412f47f9SXin Li static float32x4_t NOINLINE VPCS_ATTR
special_case(float32x4_t x,float32x4_t y,uint32x4_t special)27*412f47f9SXin Li special_case (float32x4_t x, float32x4_t y, uint32x4_t special)
28*412f47f9SXin Li {
29*412f47f9SXin Li   return v_call_f32 (tanhf, x, y, special);
30*412f47f9SXin Li }
31*412f47f9SXin Li 
32*412f47f9SXin Li /* Approximation for single-precision vector tanh(x), using a simplified
33*412f47f9SXin Li    version of expm1f. The maximum error is 2.58 ULP:
34*412f47f9SXin Li    _ZGVnN4v_tanhf (0x1.fa5eep-5) got 0x1.f9ba02p-5
35*412f47f9SXin Li 				want 0x1.f9ba08p-5.  */
V_NAME_F1(tanh)36*412f47f9SXin Li float32x4_t VPCS_ATTR V_NAME_F1 (tanh) (float32x4_t x)
37*412f47f9SXin Li {
38*412f47f9SXin Li   const struct data *d = ptr_barrier (&data);
39*412f47f9SXin Li 
40*412f47f9SXin Li   uint32x4_t ix = vreinterpretq_u32_f32 (x);
41*412f47f9SXin Li   float32x4_t ax = vabsq_f32 (x);
42*412f47f9SXin Li   uint32x4_t iax = vreinterpretq_u32_f32 (ax);
43*412f47f9SXin Li   uint32x4_t sign = veorq_u32 (ix, iax);
44*412f47f9SXin Li   uint32x4_t is_boring = vcgtq_u32 (iax, d->boring_bound);
45*412f47f9SXin Li   float32x4_t boring = vreinterpretq_f32_u32 (vorrq_u32 (sign, d->onef));
46*412f47f9SXin Li 
47*412f47f9SXin Li #if WANT_SIMD_EXCEPT
48*412f47f9SXin Li   /* If fp exceptions are to be triggered properly, set all special and boring
49*412f47f9SXin Li      lanes to 0, which will trigger no exceptions, and fix them up later.  */
50*412f47f9SXin Li   uint32x4_t special = vorrq_u32 (vcgtq_u32 (iax, d->large_bound),
51*412f47f9SXin Li 				  vcltq_u32 (iax, v_u32 (0x34000000)));
52*412f47f9SXin Li   x = v_zerofy_f32 (x, is_boring);
53*412f47f9SXin Li   if (unlikely (v_any_u32 (special)))
54*412f47f9SXin Li     x = v_zerofy_f32 (x, special);
55*412f47f9SXin Li #else
56*412f47f9SXin Li   uint32x4_t special = vcgtq_u32 (iax, d->large_bound);
57*412f47f9SXin Li #endif
58*412f47f9SXin Li 
59*412f47f9SXin Li   /* tanh(x) = (e^2x - 1) / (e^2x + 1).  */
60*412f47f9SXin Li   float32x4_t q = expm1f_inline (vmulq_n_f32 (x, 2), &d->expm1f_consts);
61*412f47f9SXin Li   float32x4_t y = vdivq_f32 (q, vaddq_f32 (q, v_f32 (2.0)));
62*412f47f9SXin Li   if (unlikely (v_any_u32 (special)))
63*412f47f9SXin Li     return special_case (vreinterpretq_f32_u32 (ix),
64*412f47f9SXin Li 			 vbslq_f32 (is_boring, boring, y), special);
65*412f47f9SXin Li   return vbslq_f32 (is_boring, boring, y);
66*412f47f9SXin Li }
67*412f47f9SXin Li 
68*412f47f9SXin Li PL_SIG (V, F, 1, tanh, -10.0, 10.0)
69*412f47f9SXin Li PL_TEST_ULP (V_NAME_F1 (tanh), 2.09)
70*412f47f9SXin Li PL_TEST_EXPECT_FENV (V_NAME_F1 (tanh), WANT_SIMD_EXCEPT)
71*412f47f9SXin Li PL_TEST_SYM_INTERVAL (V_NAME_F1 (tanh), 0, 0x1p-23, 1000)
72*412f47f9SXin Li PL_TEST_SYM_INTERVAL (V_NAME_F1 (tanh), 0x1p-23, 0x1.205966p+3, 100000)
73*412f47f9SXin Li PL_TEST_SYM_INTERVAL (V_NAME_F1 (tanh), 0x1.205966p+3, inf, 100)
74