1*412f47f9SXin Li /*
2*412f47f9SXin Li * Single-precision vector sin function.
3*412f47f9SXin Li *
4*412f47f9SXin Li * Copyright (c) 2019-2023, Arm Limited.
5*412f47f9SXin Li * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6*412f47f9SXin Li */
7*412f47f9SXin Li
8*412f47f9SXin Li #include "mathlib.h"
9*412f47f9SXin Li #include "v_math.h"
10*412f47f9SXin Li
11*412f47f9SXin Li static const struct data
12*412f47f9SXin Li {
13*412f47f9SXin Li float32x4_t poly[4];
14*412f47f9SXin Li float32x4_t range_val, inv_pi, shift, pi_1, pi_2, pi_3;
15*412f47f9SXin Li } data = {
16*412f47f9SXin Li /* 1.886 ulp error. */
17*412f47f9SXin Li .poly = { V4 (-0x1.555548p-3f), V4 (0x1.110df4p-7f), V4 (-0x1.9f42eap-13f),
18*412f47f9SXin Li V4 (0x1.5b2e76p-19f) },
19*412f47f9SXin Li
20*412f47f9SXin Li .pi_1 = V4 (0x1.921fb6p+1f),
21*412f47f9SXin Li .pi_2 = V4 (-0x1.777a5cp-24f),
22*412f47f9SXin Li .pi_3 = V4 (-0x1.ee59dap-49f),
23*412f47f9SXin Li
24*412f47f9SXin Li .inv_pi = V4 (0x1.45f306p-2f),
25*412f47f9SXin Li .shift = V4 (0x1.8p+23f),
26*412f47f9SXin Li .range_val = V4 (0x1p20f)
27*412f47f9SXin Li };
28*412f47f9SXin Li
29*412f47f9SXin Li #if WANT_SIMD_EXCEPT
30*412f47f9SXin Li # define TinyBound v_u32 (0x21000000) /* asuint32(0x1p-61f). */
31*412f47f9SXin Li # define Thresh v_u32 (0x28800000) /* RangeVal - TinyBound. */
32*412f47f9SXin Li #endif
33*412f47f9SXin Li
34*412f47f9SXin Li #define C(i) d->poly[i]
35*412f47f9SXin Li
36*412f47f9SXin Li static float32x4_t VPCS_ATTR NOINLINE
special_case(float32x4_t x,float32x4_t y,uint32x4_t odd,uint32x4_t cmp)37*412f47f9SXin Li special_case (float32x4_t x, float32x4_t y, uint32x4_t odd, uint32x4_t cmp)
38*412f47f9SXin Li {
39*412f47f9SXin Li /* Fall back to scalar code. */
40*412f47f9SXin Li y = vreinterpretq_f32_u32 (veorq_u32 (vreinterpretq_u32_f32 (y), odd));
41*412f47f9SXin Li return v_call_f32 (sinf, x, y, cmp);
42*412f47f9SXin Li }
43*412f47f9SXin Li
V_NAME_F1(sin)44*412f47f9SXin Li float32x4_t VPCS_ATTR V_NAME_F1 (sin) (float32x4_t x)
45*412f47f9SXin Li {
46*412f47f9SXin Li const struct data *d = ptr_barrier (&data);
47*412f47f9SXin Li float32x4_t n, r, r2, y;
48*412f47f9SXin Li uint32x4_t odd, cmp;
49*412f47f9SXin Li
50*412f47f9SXin Li #if WANT_SIMD_EXCEPT
51*412f47f9SXin Li uint32x4_t ir = vreinterpretq_u32_f32 (vabsq_f32 (x));
52*412f47f9SXin Li cmp = vcgeq_u32 (vsubq_u32 (ir, TinyBound), Thresh);
53*412f47f9SXin Li /* If fenv exceptions are to be triggered correctly, set any special lanes
54*412f47f9SXin Li to 1 (which is neutral w.r.t. fenv). These lanes will be fixed by
55*412f47f9SXin Li special-case handler later. */
56*412f47f9SXin Li r = vbslq_f32 (cmp, vreinterpretq_f32_u32 (cmp), x);
57*412f47f9SXin Li #else
58*412f47f9SXin Li r = x;
59*412f47f9SXin Li cmp = vcageq_f32 (x, d->range_val);
60*412f47f9SXin Li #endif
61*412f47f9SXin Li
62*412f47f9SXin Li /* n = rint(|x|/pi) */
63*412f47f9SXin Li n = vfmaq_f32 (d->shift, d->inv_pi, r);
64*412f47f9SXin Li odd = vshlq_n_u32 (vreinterpretq_u32_f32 (n), 31);
65*412f47f9SXin Li n = vsubq_f32 (n, d->shift);
66*412f47f9SXin Li
67*412f47f9SXin Li /* r = |x| - n*pi (range reduction into -pi/2 .. pi/2) */
68*412f47f9SXin Li r = vfmsq_f32 (r, d->pi_1, n);
69*412f47f9SXin Li r = vfmsq_f32 (r, d->pi_2, n);
70*412f47f9SXin Li r = vfmsq_f32 (r, d->pi_3, n);
71*412f47f9SXin Li
72*412f47f9SXin Li /* y = sin(r) */
73*412f47f9SXin Li r2 = vmulq_f32 (r, r);
74*412f47f9SXin Li y = vfmaq_f32 (C (2), C (3), r2);
75*412f47f9SXin Li y = vfmaq_f32 (C (1), y, r2);
76*412f47f9SXin Li y = vfmaq_f32 (C (0), y, r2);
77*412f47f9SXin Li y = vfmaq_f32 (r, vmulq_f32 (y, r2), r);
78*412f47f9SXin Li
79*412f47f9SXin Li if (unlikely (v_any_u32 (cmp)))
80*412f47f9SXin Li return special_case (x, y, odd, cmp);
81*412f47f9SXin Li return vreinterpretq_f32_u32 (veorq_u32 (vreinterpretq_u32_f32 (y), odd));
82*412f47f9SXin Li }
83