1*412f47f9SXin Li /*
2*412f47f9SXin Li * Double-precision SVE cosh(x) function.
3*412f47f9SXin Li *
4*412f47f9SXin Li * Copyright (c) 2023, Arm Limited.
5*412f47f9SXin Li * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6*412f47f9SXin Li */
7*412f47f9SXin Li
8*412f47f9SXin Li #include "sv_math.h"
9*412f47f9SXin Li #include "pl_sig.h"
10*412f47f9SXin Li #include "pl_test.h"
11*412f47f9SXin Li
12*412f47f9SXin Li static const struct data
13*412f47f9SXin Li {
14*412f47f9SXin Li float64_t poly[3];
15*412f47f9SXin Li float64_t inv_ln2, ln2_hi, ln2_lo, shift, thres;
16*412f47f9SXin Li uint64_t index_mask, special_bound;
17*412f47f9SXin Li } data = {
18*412f47f9SXin Li .poly = { 0x1.fffffffffffd4p-2, 0x1.5555571d6b68cp-3,
19*412f47f9SXin Li 0x1.5555576a59599p-5, },
20*412f47f9SXin Li
21*412f47f9SXin Li .inv_ln2 = 0x1.71547652b82fep8, /* N/ln2. */
22*412f47f9SXin Li /* -ln2/N. */
23*412f47f9SXin Li .ln2_hi = -0x1.62e42fefa39efp-9,
24*412f47f9SXin Li .ln2_lo = -0x1.abc9e3b39803f3p-64,
25*412f47f9SXin Li .shift = 0x1.8p+52,
26*412f47f9SXin Li .thres = 704.0,
27*412f47f9SXin Li
28*412f47f9SXin Li .index_mask = 0xff,
29*412f47f9SXin Li /* 0x1.6p9, above which exp overflows. */
30*412f47f9SXin Li .special_bound = 0x4086000000000000,
31*412f47f9SXin Li };
32*412f47f9SXin Li
33*412f47f9SXin Li static svfloat64_t NOINLINE
special_case(svfloat64_t x,svfloat64_t y,svbool_t special)34*412f47f9SXin Li special_case (svfloat64_t x, svfloat64_t y, svbool_t special)
35*412f47f9SXin Li {
36*412f47f9SXin Li return sv_call_f64 (cosh, x, y, special);
37*412f47f9SXin Li }
38*412f47f9SXin Li
39*412f47f9SXin Li /* Helper for approximating exp(x). Copied from sv_exp_tail, with no
40*412f47f9SXin Li special-case handling or tail. */
41*412f47f9SXin Li static inline svfloat64_t
exp_inline(svfloat64_t x,const svbool_t pg,const struct data * d)42*412f47f9SXin Li exp_inline (svfloat64_t x, const svbool_t pg, const struct data *d)
43*412f47f9SXin Li {
44*412f47f9SXin Li /* Calculate exp(x). */
45*412f47f9SXin Li svfloat64_t z = svmla_x (pg, sv_f64 (d->shift), x, d->inv_ln2);
46*412f47f9SXin Li svfloat64_t n = svsub_x (pg, z, d->shift);
47*412f47f9SXin Li
48*412f47f9SXin Li svfloat64_t r = svmla_x (pg, x, n, d->ln2_hi);
49*412f47f9SXin Li r = svmla_x (pg, r, n, d->ln2_lo);
50*412f47f9SXin Li
51*412f47f9SXin Li svuint64_t u = svreinterpret_u64 (z);
52*412f47f9SXin Li svuint64_t e = svlsl_x (pg, u, 52 - V_EXP_TAIL_TABLE_BITS);
53*412f47f9SXin Li svuint64_t i = svand_x (pg, u, d->index_mask);
54*412f47f9SXin Li
55*412f47f9SXin Li svfloat64_t y = svmla_x (pg, sv_f64 (d->poly[1]), r, d->poly[2]);
56*412f47f9SXin Li y = svmla_x (pg, sv_f64 (d->poly[0]), r, y);
57*412f47f9SXin Li y = svmla_x (pg, sv_f64 (1.0), r, y);
58*412f47f9SXin Li y = svmul_x (pg, r, y);
59*412f47f9SXin Li
60*412f47f9SXin Li /* s = 2^(n/N). */
61*412f47f9SXin Li u = svld1_gather_index (pg, __v_exp_tail_data, i);
62*412f47f9SXin Li svfloat64_t s = svreinterpret_f64 (svadd_x (pg, u, e));
63*412f47f9SXin Li
64*412f47f9SXin Li return svmla_x (pg, s, s, y);
65*412f47f9SXin Li }
66*412f47f9SXin Li
67*412f47f9SXin Li /* Approximation for SVE double-precision cosh(x) using exp_inline.
68*412f47f9SXin Li cosh(x) = (exp(x) + exp(-x)) / 2.
69*412f47f9SXin Li The greatest observed error is in the scalar fall-back region, so is the
70*412f47f9SXin Li same as the scalar routine, 1.93 ULP:
71*412f47f9SXin Li _ZGVsMxv_cosh (0x1.628ad45039d2fp+9) got 0x1.fd774e958236dp+1021
72*412f47f9SXin Li want 0x1.fd774e958236fp+1021.
73*412f47f9SXin Li
74*412f47f9SXin Li The greatest observed error in the non-special region is 1.54 ULP:
75*412f47f9SXin Li _ZGVsMxv_cosh (0x1.ba5651dd4486bp+2) got 0x1.f5e2bb8d5c98fp+8
76*412f47f9SXin Li want 0x1.f5e2bb8d5c991p+8. */
SV_NAME_D1(cosh)77*412f47f9SXin Li svfloat64_t SV_NAME_D1 (cosh) (svfloat64_t x, const svbool_t pg)
78*412f47f9SXin Li {
79*412f47f9SXin Li const struct data *d = ptr_barrier (&data);
80*412f47f9SXin Li
81*412f47f9SXin Li svfloat64_t ax = svabs_x (pg, x);
82*412f47f9SXin Li svbool_t special = svcmpgt (pg, svreinterpret_u64 (ax), d->special_bound);
83*412f47f9SXin Li
84*412f47f9SXin Li /* Up to the point that exp overflows, we can use it to calculate cosh by
85*412f47f9SXin Li exp(|x|) / 2 + 1 / (2 * exp(|x|)). */
86*412f47f9SXin Li svfloat64_t t = exp_inline (ax, pg, d);
87*412f47f9SXin Li svfloat64_t half_t = svmul_x (pg, t, 0.5);
88*412f47f9SXin Li svfloat64_t half_over_t = svdivr_x (pg, t, 0.5);
89*412f47f9SXin Li
90*412f47f9SXin Li /* Fall back to scalar for any special cases. */
91*412f47f9SXin Li if (unlikely (svptest_any (pg, special)))
92*412f47f9SXin Li return special_case (x, svadd_x (pg, half_t, half_over_t), special);
93*412f47f9SXin Li
94*412f47f9SXin Li return svadd_x (pg, half_t, half_over_t);
95*412f47f9SXin Li }
96*412f47f9SXin Li
97*412f47f9SXin Li PL_SIG (SV, D, 1, cosh, -10.0, 10.0)
98*412f47f9SXin Li PL_TEST_ULP (SV_NAME_D1 (cosh), 1.43)
99*412f47f9SXin Li PL_TEST_SYM_INTERVAL (SV_NAME_D1 (cosh), 0, 0x1.6p9, 100000)
100*412f47f9SXin Li PL_TEST_SYM_INTERVAL (SV_NAME_D1 (cosh), 0x1.6p9, inf, 1000)
101