xref: /aosp_15_r20/external/arm-optimized-routines/pl/math/v_cosh_2u.c (revision 412f47f9e737e10ed5cc46ec6a8d7fa2264f8a14)
1*412f47f9SXin Li /*
2*412f47f9SXin Li  * Double-precision vector cosh(x) function.
3*412f47f9SXin Li  *
4*412f47f9SXin Li  * Copyright (c) 2022-2024, Arm Limited.
5*412f47f9SXin Li  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6*412f47f9SXin Li  */
7*412f47f9SXin Li 
8*412f47f9SXin Li #include "v_math.h"
9*412f47f9SXin Li #include "pl_sig.h"
10*412f47f9SXin Li #include "pl_test.h"
11*412f47f9SXin Li 
12*412f47f9SXin Li static const struct data
13*412f47f9SXin Li {
14*412f47f9SXin Li   float64x2_t poly[3];
15*412f47f9SXin Li   float64x2_t inv_ln2;
16*412f47f9SXin Li   double ln2[2];
17*412f47f9SXin Li   float64x2_t shift, thres;
18*412f47f9SXin Li   uint64x2_t index_mask, special_bound;
19*412f47f9SXin Li } data = {
20*412f47f9SXin Li   .poly = { V2 (0x1.fffffffffffd4p-2), V2 (0x1.5555571d6b68cp-3),
21*412f47f9SXin Li 	    V2 (0x1.5555576a59599p-5), },
22*412f47f9SXin Li 
23*412f47f9SXin Li   .inv_ln2 = V2 (0x1.71547652b82fep8), /* N/ln2.  */
24*412f47f9SXin Li   /* -ln2/N.  */
25*412f47f9SXin Li   .ln2 = {-0x1.62e42fefa39efp-9, -0x1.abc9e3b39803f3p-64},
26*412f47f9SXin Li   .shift = V2 (0x1.8p+52),
27*412f47f9SXin Li   .thres = V2 (704.0),
28*412f47f9SXin Li 
29*412f47f9SXin Li   .index_mask = V2 (0xff),
30*412f47f9SXin Li   /* 0x1.6p9, above which exp overflows.  */
31*412f47f9SXin Li   .special_bound = V2 (0x4086000000000000),
32*412f47f9SXin Li };
33*412f47f9SXin Li 
34*412f47f9SXin Li static float64x2_t NOINLINE VPCS_ATTR
special_case(float64x2_t x,float64x2_t y,uint64x2_t special)35*412f47f9SXin Li special_case (float64x2_t x, float64x2_t y, uint64x2_t special)
36*412f47f9SXin Li {
37*412f47f9SXin Li   return v_call_f64 (cosh, x, y, special);
38*412f47f9SXin Li }
39*412f47f9SXin Li 
40*412f47f9SXin Li /* Helper for approximating exp(x). Copied from v_exp_tail, with no
41*412f47f9SXin Li    special-case handling or tail.  */
42*412f47f9SXin Li static inline float64x2_t
exp_inline(float64x2_t x)43*412f47f9SXin Li exp_inline (float64x2_t x)
44*412f47f9SXin Li {
45*412f47f9SXin Li   const struct data *d = ptr_barrier (&data);
46*412f47f9SXin Li 
47*412f47f9SXin Li   /* n = round(x/(ln2/N)).  */
48*412f47f9SXin Li   float64x2_t z = vfmaq_f64 (d->shift, x, d->inv_ln2);
49*412f47f9SXin Li   uint64x2_t u = vreinterpretq_u64_f64 (z);
50*412f47f9SXin Li   float64x2_t n = vsubq_f64 (z, d->shift);
51*412f47f9SXin Li 
52*412f47f9SXin Li   /* r = x - n*ln2/N.  */
53*412f47f9SXin Li   float64x2_t ln2 = vld1q_f64 (d->ln2);
54*412f47f9SXin Li   float64x2_t r = vfmaq_laneq_f64 (x, n, ln2, 0);
55*412f47f9SXin Li   r = vfmaq_laneq_f64 (r, n, ln2, 1);
56*412f47f9SXin Li 
57*412f47f9SXin Li   uint64x2_t e = vshlq_n_u64 (u, 52 - V_EXP_TAIL_TABLE_BITS);
58*412f47f9SXin Li   uint64x2_t i = vandq_u64 (u, d->index_mask);
59*412f47f9SXin Li 
60*412f47f9SXin Li   /* y = tail + exp(r) - 1 ~= r + C1 r^2 + C2 r^3 + C3 r^4.  */
61*412f47f9SXin Li   float64x2_t y = vfmaq_f64 (d->poly[1], d->poly[2], r);
62*412f47f9SXin Li   y = vfmaq_f64 (d->poly[0], y, r);
63*412f47f9SXin Li   y = vmulq_f64 (vfmaq_f64 (v_f64 (1), y, r), r);
64*412f47f9SXin Li 
65*412f47f9SXin Li   /* s = 2^(n/N).  */
66*412f47f9SXin Li   u = v_lookup_u64 (__v_exp_tail_data, i);
67*412f47f9SXin Li   float64x2_t s = vreinterpretq_f64_u64 (vaddq_u64 (u, e));
68*412f47f9SXin Li 
69*412f47f9SXin Li   return vfmaq_f64 (s, y, s);
70*412f47f9SXin Li }
71*412f47f9SXin Li 
72*412f47f9SXin Li /* Approximation for vector double-precision cosh(x) using exp_inline.
73*412f47f9SXin Li    cosh(x) = (exp(x) + exp(-x)) / 2.
74*412f47f9SXin Li    The greatest observed error is in the scalar fall-back region, so is the
75*412f47f9SXin Li    same as the scalar routine, 1.93 ULP:
76*412f47f9SXin Li    _ZGVnN2v_cosh (0x1.628af341989dap+9) got 0x1.fdf28623ef921p+1021
77*412f47f9SXin Li 				       want 0x1.fdf28623ef923p+1021.
78*412f47f9SXin Li 
79*412f47f9SXin Li    The greatest observed error in the non-special region is 1.54 ULP:
80*412f47f9SXin Li    _ZGVnN2v_cosh (0x1.8e205b6ecacf7p+2) got 0x1.f711dcb0c77afp+7
81*412f47f9SXin Li 				       want 0x1.f711dcb0c77b1p+7.  */
V_NAME_D1(cosh)82*412f47f9SXin Li float64x2_t VPCS_ATTR V_NAME_D1 (cosh) (float64x2_t x)
83*412f47f9SXin Li {
84*412f47f9SXin Li   const struct data *d = ptr_barrier (&data);
85*412f47f9SXin Li 
86*412f47f9SXin Li   float64x2_t ax = vabsq_f64 (x);
87*412f47f9SXin Li   uint64x2_t special
88*412f47f9SXin Li       = vcgtq_u64 (vreinterpretq_u64_f64 (ax), d->special_bound);
89*412f47f9SXin Li 
90*412f47f9SXin Li   /* Up to the point that exp overflows, we can use it to calculate cosh by
91*412f47f9SXin Li      exp(|x|) / 2 + 1 / (2 * exp(|x|)).  */
92*412f47f9SXin Li   float64x2_t t = exp_inline (ax);
93*412f47f9SXin Li   float64x2_t half_t = vmulq_n_f64 (t, 0.5);
94*412f47f9SXin Li   float64x2_t half_over_t = vdivq_f64 (v_f64 (0.5), t);
95*412f47f9SXin Li 
96*412f47f9SXin Li   /* Fall back to scalar for any special cases.  */
97*412f47f9SXin Li   if (unlikely (v_any_u64 (special)))
98*412f47f9SXin Li     return special_case (x, vaddq_f64 (half_t, half_over_t), special);
99*412f47f9SXin Li 
100*412f47f9SXin Li   return vaddq_f64 (half_t, half_over_t);
101*412f47f9SXin Li }
102*412f47f9SXin Li 
103*412f47f9SXin Li PL_SIG (V, D, 1, cosh, -10.0, 10.0)
104*412f47f9SXin Li PL_TEST_ULP (V_NAME_D1 (cosh), 1.43)
105*412f47f9SXin Li PL_TEST_EXPECT_FENV (V_NAME_D1 (cosh), WANT_SIMD_EXCEPT)
106*412f47f9SXin Li PL_TEST_SYM_INTERVAL (V_NAME_D1 (cosh), 0, 0x1.6p9, 100000)
107*412f47f9SXin Li PL_TEST_SYM_INTERVAL (V_NAME_D1 (cosh), 0x1.6p9, inf, 1000)
108