xref: /aosp_15_r20/external/arm-optimized-routines/pl/math/v_atanf_3u.c (revision 412f47f9e737e10ed5cc46ec6a8d7fa2264f8a14)
1*412f47f9SXin Li /*
2*412f47f9SXin Li  * Single-precision vector atan(x) function.
3*412f47f9SXin Li  *
4*412f47f9SXin Li  * Copyright (c) 2021-2023, Arm Limited.
5*412f47f9SXin Li  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6*412f47f9SXin Li  */
7*412f47f9SXin Li 
8*412f47f9SXin Li #include "v_math.h"
9*412f47f9SXin Li #include "pl_sig.h"
10*412f47f9SXin Li #include "pl_test.h"
11*412f47f9SXin Li #include "poly_advsimd_f32.h"
12*412f47f9SXin Li 
13*412f47f9SXin Li static const struct data
14*412f47f9SXin Li {
15*412f47f9SXin Li   float32x4_t poly[8];
16*412f47f9SXin Li   float32x4_t pi_over_2;
17*412f47f9SXin Li } data = {
18*412f47f9SXin Li   /* Coefficients of polynomial P such that atan(x)~x+x*P(x^2) on
19*412f47f9SXin Li      [2**-128, 1.0].
20*412f47f9SXin Li      Generated using fpminimax between FLT_MIN and 1.  */
21*412f47f9SXin Li   .poly = { V4 (-0x1.55555p-2f), V4 (0x1.99935ep-3f), V4 (-0x1.24051ep-3f),
22*412f47f9SXin Li 	    V4 (0x1.bd7368p-4f), V4 (-0x1.491f0ep-4f), V4 (0x1.93a2c0p-5f),
23*412f47f9SXin Li 	    V4 (-0x1.4c3c60p-6f), V4 (0x1.01fd88p-8f) },
24*412f47f9SXin Li   .pi_over_2 = V4 (0x1.921fb6p+0f),
25*412f47f9SXin Li };
26*412f47f9SXin Li 
27*412f47f9SXin Li #define SignMask v_u32 (0x80000000)
28*412f47f9SXin Li 
29*412f47f9SXin Li #define P(i) d->poly[i]
30*412f47f9SXin Li 
31*412f47f9SXin Li #define TinyBound 0x30800000 /* asuint(0x1p-30).  */
32*412f47f9SXin Li #define BigBound 0x4e800000  /* asuint(0x1p30).  */
33*412f47f9SXin Li 
34*412f47f9SXin Li #if WANT_SIMD_EXCEPT
35*412f47f9SXin Li static float32x4_t VPCS_ATTR NOINLINE
special_case(float32x4_t x,float32x4_t y,uint32x4_t special)36*412f47f9SXin Li special_case (float32x4_t x, float32x4_t y, uint32x4_t special)
37*412f47f9SXin Li {
38*412f47f9SXin Li   return v_call_f32 (atanf, x, y, special);
39*412f47f9SXin Li }
40*412f47f9SXin Li #endif
41*412f47f9SXin Li 
42*412f47f9SXin Li /* Fast implementation of vector atanf based on
43*412f47f9SXin Li    atan(x) ~ shift + z + z^3 * P(z^2) with reduction to [0,1]
44*412f47f9SXin Li    using z=-1/x and shift = pi/2. Maximum observed error is 2.9ulps:
45*412f47f9SXin Li    _ZGVnN4v_atanf (0x1.0468f6p+0) got 0x1.967f06p-1 want 0x1.967fp-1.  */
V_NAME_F1(atan)46*412f47f9SXin Li float32x4_t VPCS_ATTR V_NAME_F1 (atan) (float32x4_t x)
47*412f47f9SXin Li {
48*412f47f9SXin Li   const struct data *d = ptr_barrier (&data);
49*412f47f9SXin Li 
50*412f47f9SXin Li   /* Small cases, infs and nans are supported by our approximation technique,
51*412f47f9SXin Li      but do not set fenv flags correctly. Only trigger special case if we need
52*412f47f9SXin Li      fenv.  */
53*412f47f9SXin Li   uint32x4_t ix = vreinterpretq_u32_f32 (x);
54*412f47f9SXin Li   uint32x4_t sign = vandq_u32 (ix, SignMask);
55*412f47f9SXin Li 
56*412f47f9SXin Li #if WANT_SIMD_EXCEPT
57*412f47f9SXin Li   uint32x4_t ia = vandq_u32 (ix, v_u32 (0x7ff00000));
58*412f47f9SXin Li   uint32x4_t special = vcgtq_u32 (vsubq_u32 (ia, v_u32 (TinyBound)),
59*412f47f9SXin Li 				  v_u32 (BigBound - TinyBound));
60*412f47f9SXin Li   /* If any lane is special, fall back to the scalar routine for all lanes.  */
61*412f47f9SXin Li   if (unlikely (v_any_u32 (special)))
62*412f47f9SXin Li     return special_case (x, x, v_u32 (-1));
63*412f47f9SXin Li #endif
64*412f47f9SXin Li 
65*412f47f9SXin Li   /* Argument reduction:
66*412f47f9SXin Li      y := arctan(x) for x < 1
67*412f47f9SXin Li      y := pi/2 + arctan(-1/x) for x > 1
68*412f47f9SXin Li      Hence, use z=-1/a if x>=1, otherwise z=a.  */
69*412f47f9SXin Li   uint32x4_t red = vcagtq_f32 (x, v_f32 (1.0));
70*412f47f9SXin Li   /* Avoid dependency in abs(x) in division (and comparison).  */
71*412f47f9SXin Li   float32x4_t z = vbslq_f32 (red, vdivq_f32 (v_f32 (1.0f), x), x);
72*412f47f9SXin Li   float32x4_t shift = vreinterpretq_f32_u32 (
73*412f47f9SXin Li       vandq_u32 (red, vreinterpretq_u32_f32 (d->pi_over_2)));
74*412f47f9SXin Li   /* Use absolute value only when needed (odd powers of z).  */
75*412f47f9SXin Li   float32x4_t az = vbslq_f32 (
76*412f47f9SXin Li       SignMask, vreinterpretq_f32_u32 (vandq_u32 (SignMask, red)), z);
77*412f47f9SXin Li 
78*412f47f9SXin Li   /* Calculate the polynomial approximation.
79*412f47f9SXin Li      Use 2-level Estrin scheme for P(z^2) with deg(P)=7. However,
80*412f47f9SXin Li      a standard implementation using z8 creates spurious underflow
81*412f47f9SXin Li      in the very last fma (when z^8 is small enough).
82*412f47f9SXin Li      Therefore, we split the last fma into a mul and an fma.
83*412f47f9SXin Li      Horner and single-level Estrin have higher errors that exceed
84*412f47f9SXin Li      threshold.  */
85*412f47f9SXin Li   float32x4_t z2 = vmulq_f32 (z, z);
86*412f47f9SXin Li   float32x4_t z4 = vmulq_f32 (z2, z2);
87*412f47f9SXin Li 
88*412f47f9SXin Li   float32x4_t y = vfmaq_f32 (
89*412f47f9SXin Li       v_pairwise_poly_3_f32 (z2, z4, d->poly), z4,
90*412f47f9SXin Li       vmulq_f32 (z4, v_pairwise_poly_3_f32 (z2, z4, d->poly + 4)));
91*412f47f9SXin Li 
92*412f47f9SXin Li   /* y = shift + z * P(z^2).  */
93*412f47f9SXin Li   y = vaddq_f32 (vfmaq_f32 (az, y, vmulq_f32 (z2, az)), shift);
94*412f47f9SXin Li 
95*412f47f9SXin Li   /* y = atan(x) if x>0, -atan(-x) otherwise.  */
96*412f47f9SXin Li   y = vreinterpretq_f32_u32 (veorq_u32 (vreinterpretq_u32_f32 (y), sign));
97*412f47f9SXin Li 
98*412f47f9SXin Li   return y;
99*412f47f9SXin Li }
100*412f47f9SXin Li 
101*412f47f9SXin Li PL_SIG (V, F, 1, atan, -10.0, 10.0)
102*412f47f9SXin Li PL_TEST_ULP (V_NAME_F1 (atan), 2.5)
103*412f47f9SXin Li PL_TEST_EXPECT_FENV (V_NAME_F1 (atan), WANT_SIMD_EXCEPT)
104*412f47f9SXin Li PL_TEST_SYM_INTERVAL (V_NAME_F1 (atan), 0, 0x1p-30, 5000)
105*412f47f9SXin Li PL_TEST_SYM_INTERVAL (V_NAME_F1 (atan), 0x1p-30, 1, 40000)
106*412f47f9SXin Li PL_TEST_SYM_INTERVAL (V_NAME_F1 (atan), 1, 0x1p30, 40000)
107*412f47f9SXin Li PL_TEST_SYM_INTERVAL (V_NAME_F1 (atan), 0x1p30, inf, 1000)
108