1*412f47f9SXin Li /*
2*412f47f9SXin Li * Double-precision vector sin function.
3*412f47f9SXin Li *
4*412f47f9SXin Li * Copyright (c) 2019-2023, Arm Limited.
5*412f47f9SXin Li * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6*412f47f9SXin Li */
7*412f47f9SXin Li
8*412f47f9SXin Li #include "mathlib.h"
9*412f47f9SXin Li #include "v_math.h"
10*412f47f9SXin Li
11*412f47f9SXin Li static const struct data
12*412f47f9SXin Li {
13*412f47f9SXin Li float64x2_t poly[7];
14*412f47f9SXin Li float64x2_t range_val, inv_pi, shift, pi_1, pi_2, pi_3;
15*412f47f9SXin Li } data = {
16*412f47f9SXin Li .poly = { V2 (-0x1.555555555547bp-3), V2 (0x1.1111111108a4dp-7),
17*412f47f9SXin Li V2 (-0x1.a01a019936f27p-13), V2 (0x1.71de37a97d93ep-19),
18*412f47f9SXin Li V2 (-0x1.ae633919987c6p-26), V2 (0x1.60e277ae07cecp-33),
19*412f47f9SXin Li V2 (-0x1.9e9540300a1p-41) },
20*412f47f9SXin Li
21*412f47f9SXin Li .range_val = V2 (0x1p23),
22*412f47f9SXin Li .inv_pi = V2 (0x1.45f306dc9c883p-2),
23*412f47f9SXin Li .pi_1 = V2 (0x1.921fb54442d18p+1),
24*412f47f9SXin Li .pi_2 = V2 (0x1.1a62633145c06p-53),
25*412f47f9SXin Li .pi_3 = V2 (0x1.c1cd129024e09p-106),
26*412f47f9SXin Li .shift = V2 (0x1.8p52),
27*412f47f9SXin Li };
28*412f47f9SXin Li
29*412f47f9SXin Li #if WANT_SIMD_EXCEPT
30*412f47f9SXin Li # define TinyBound v_u64 (0x3000000000000000) /* asuint64 (0x1p-255). */
31*412f47f9SXin Li # define Thresh v_u64 (0x1160000000000000) /* RangeVal - TinyBound. */
32*412f47f9SXin Li #endif
33*412f47f9SXin Li
34*412f47f9SXin Li #define C(i) d->poly[i]
35*412f47f9SXin Li
36*412f47f9SXin Li static float64x2_t VPCS_ATTR NOINLINE
special_case(float64x2_t x,float64x2_t y,uint64x2_t odd,uint64x2_t cmp)37*412f47f9SXin Li special_case (float64x2_t x, float64x2_t y, uint64x2_t odd, uint64x2_t cmp)
38*412f47f9SXin Li {
39*412f47f9SXin Li y = vreinterpretq_f64_u64 (veorq_u64 (vreinterpretq_u64_f64 (y), odd));
40*412f47f9SXin Li return v_call_f64 (sin, x, y, cmp);
41*412f47f9SXin Li }
42*412f47f9SXin Li
43*412f47f9SXin Li /* Vector (AdvSIMD) sin approximation.
44*412f47f9SXin Li Maximum observed error in [-pi/2, pi/2], where argument is not reduced,
45*412f47f9SXin Li is 2.87 ULP:
46*412f47f9SXin Li _ZGVnN2v_sin (0x1.921d5c6a07142p+0) got 0x1.fffffffa7dc02p-1
47*412f47f9SXin Li want 0x1.fffffffa7dc05p-1
48*412f47f9SXin Li Maximum observed error in the entire non-special domain ([-2^23, 2^23])
49*412f47f9SXin Li is 3.22 ULP:
50*412f47f9SXin Li _ZGVnN2v_sin (0x1.5702447b6f17bp+22) got 0x1.ffdcd125c84fbp-3
51*412f47f9SXin Li want 0x1.ffdcd125c84f8p-3. */
V_NAME_D1(sin)52*412f47f9SXin Li float64x2_t VPCS_ATTR V_NAME_D1 (sin) (float64x2_t x)
53*412f47f9SXin Li {
54*412f47f9SXin Li const struct data *d = ptr_barrier (&data);
55*412f47f9SXin Li float64x2_t n, r, r2, r3, r4, y, t1, t2, t3;
56*412f47f9SXin Li uint64x2_t odd, cmp;
57*412f47f9SXin Li
58*412f47f9SXin Li #if WANT_SIMD_EXCEPT
59*412f47f9SXin Li /* Detect |x| <= TinyBound or |x| >= RangeVal. If fenv exceptions are to be
60*412f47f9SXin Li triggered correctly, set any special lanes to 1 (which is neutral w.r.t.
61*412f47f9SXin Li fenv). These lanes will be fixed by special-case handler later. */
62*412f47f9SXin Li uint64x2_t ir = vreinterpretq_u64_f64 (vabsq_f64 (x));
63*412f47f9SXin Li cmp = vcgeq_u64 (vsubq_u64 (ir, TinyBound), Thresh);
64*412f47f9SXin Li r = vbslq_f64 (cmp, vreinterpretq_f64_u64 (cmp), x);
65*412f47f9SXin Li #else
66*412f47f9SXin Li r = x;
67*412f47f9SXin Li cmp = vcageq_f64 (x, d->range_val);
68*412f47f9SXin Li #endif
69*412f47f9SXin Li
70*412f47f9SXin Li /* n = rint(|x|/pi). */
71*412f47f9SXin Li n = vfmaq_f64 (d->shift, d->inv_pi, r);
72*412f47f9SXin Li odd = vshlq_n_u64 (vreinterpretq_u64_f64 (n), 63);
73*412f47f9SXin Li n = vsubq_f64 (n, d->shift);
74*412f47f9SXin Li
75*412f47f9SXin Li /* r = |x| - n*pi (range reduction into -pi/2 .. pi/2). */
76*412f47f9SXin Li r = vfmsq_f64 (r, d->pi_1, n);
77*412f47f9SXin Li r = vfmsq_f64 (r, d->pi_2, n);
78*412f47f9SXin Li r = vfmsq_f64 (r, d->pi_3, n);
79*412f47f9SXin Li
80*412f47f9SXin Li /* sin(r) poly approx. */
81*412f47f9SXin Li r2 = vmulq_f64 (r, r);
82*412f47f9SXin Li r3 = vmulq_f64 (r2, r);
83*412f47f9SXin Li r4 = vmulq_f64 (r2, r2);
84*412f47f9SXin Li
85*412f47f9SXin Li t1 = vfmaq_f64 (C (4), C (5), r2);
86*412f47f9SXin Li t2 = vfmaq_f64 (C (2), C (3), r2);
87*412f47f9SXin Li t3 = vfmaq_f64 (C (0), C (1), r2);
88*412f47f9SXin Li
89*412f47f9SXin Li y = vfmaq_f64 (t1, C (6), r4);
90*412f47f9SXin Li y = vfmaq_f64 (t2, y, r4);
91*412f47f9SXin Li y = vfmaq_f64 (t3, y, r4);
92*412f47f9SXin Li y = vfmaq_f64 (r, y, r3);
93*412f47f9SXin Li
94*412f47f9SXin Li if (unlikely (v_any_u64 (cmp)))
95*412f47f9SXin Li return special_case (x, y, odd, cmp);
96*412f47f9SXin Li return vreinterpretq_f64_u64 (veorq_u64 (vreinterpretq_u64_f64 (y), odd));
97*412f47f9SXin Li }
98