1*412f47f9SXin Li /*
2*412f47f9SXin Li * Double-precision vector erf(x) function.
3*412f47f9SXin Li *
4*412f47f9SXin Li * Copyright (c) 2023-2024, Arm Limited.
5*412f47f9SXin Li * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6*412f47f9SXin Li */
7*412f47f9SXin Li
8*412f47f9SXin Li #include "v_math.h"
9*412f47f9SXin Li #include "pl_sig.h"
10*412f47f9SXin Li #include "pl_test.h"
11*412f47f9SXin Li
12*412f47f9SXin Li static const struct data
13*412f47f9SXin Li {
14*412f47f9SXin Li float64x2_t third;
15*412f47f9SXin Li float64x2_t tenth, two_over_five, two_over_fifteen;
16*412f47f9SXin Li float64x2_t two_over_nine, two_over_fortyfive;
17*412f47f9SXin Li float64x2_t max, shift;
18*412f47f9SXin Li #if WANT_SIMD_EXCEPT
19*412f47f9SXin Li float64x2_t tiny_bound, huge_bound, scale_minus_one;
20*412f47f9SXin Li #endif
21*412f47f9SXin Li } data = {
22*412f47f9SXin Li .third = V2 (0x1.5555555555556p-2), /* used to compute 2/3 and 1/6 too. */
23*412f47f9SXin Li .two_over_fifteen = V2 (0x1.1111111111111p-3),
24*412f47f9SXin Li .tenth = V2 (-0x1.999999999999ap-4),
25*412f47f9SXin Li .two_over_five = V2 (-0x1.999999999999ap-2),
26*412f47f9SXin Li .two_over_nine = V2 (-0x1.c71c71c71c71cp-3),
27*412f47f9SXin Li .two_over_fortyfive = V2 (0x1.6c16c16c16c17p-5),
28*412f47f9SXin Li .max = V2 (5.9921875), /* 6 - 1/128. */
29*412f47f9SXin Li .shift = V2 (0x1p45),
30*412f47f9SXin Li #if WANT_SIMD_EXCEPT
31*412f47f9SXin Li .huge_bound = V2 (0x1p205),
32*412f47f9SXin Li .tiny_bound = V2 (0x1p-226),
33*412f47f9SXin Li .scale_minus_one = V2 (0x1.06eba8214db69p-3), /* 2/sqrt(pi) - 1.0. */
34*412f47f9SXin Li #endif
35*412f47f9SXin Li };
36*412f47f9SXin Li
37*412f47f9SXin Li #define AbsMask 0x7fffffffffffffff
38*412f47f9SXin Li
39*412f47f9SXin Li struct entry
40*412f47f9SXin Li {
41*412f47f9SXin Li float64x2_t erf;
42*412f47f9SXin Li float64x2_t scale;
43*412f47f9SXin Li };
44*412f47f9SXin Li
45*412f47f9SXin Li static inline struct entry
lookup(uint64x2_t i)46*412f47f9SXin Li lookup (uint64x2_t i)
47*412f47f9SXin Li {
48*412f47f9SXin Li struct entry e;
49*412f47f9SXin Li float64x2_t e1 = vld1q_f64 (&__erf_data.tab[vgetq_lane_u64 (i, 0)].erf),
50*412f47f9SXin Li e2 = vld1q_f64 (&__erf_data.tab[vgetq_lane_u64 (i, 1)].erf);
51*412f47f9SXin Li e.erf = vuzp1q_f64 (e1, e2);
52*412f47f9SXin Li e.scale = vuzp2q_f64 (e1, e2);
53*412f47f9SXin Li return e;
54*412f47f9SXin Li }
55*412f47f9SXin Li
56*412f47f9SXin Li /* Double-precision implementation of vector erf(x).
57*412f47f9SXin Li Approximation based on series expansion near x rounded to
58*412f47f9SXin Li nearest multiple of 1/128.
59*412f47f9SXin Li Let d = x - r, and scale = 2 / sqrt(pi) * exp(-r^2). For x near r,
60*412f47f9SXin Li
61*412f47f9SXin Li erf(x) ~ erf(r) + scale * d * [
62*412f47f9SXin Li + 1
63*412f47f9SXin Li - r d
64*412f47f9SXin Li + 1/3 (2 r^2 - 1) d^2
65*412f47f9SXin Li - 1/6 (r (2 r^2 - 3)) d^3
66*412f47f9SXin Li + 1/30 (4 r^4 - 12 r^2 + 3) d^4
67*412f47f9SXin Li - 1/90 (4 r^4 - 20 r^2 + 15) d^5
68*412f47f9SXin Li ]
69*412f47f9SXin Li
70*412f47f9SXin Li Maximum measure error: 2.29 ULP
71*412f47f9SXin Li V_NAME_D1 (erf)(-0x1.00003c924e5d1p-8) got -0x1.20dd59132ebadp-8
72*412f47f9SXin Li want -0x1.20dd59132ebafp-8. */
V_NAME_D1(erf)73*412f47f9SXin Li float64x2_t VPCS_ATTR V_NAME_D1 (erf) (float64x2_t x)
74*412f47f9SXin Li {
75*412f47f9SXin Li const struct data *dat = ptr_barrier (&data);
76*412f47f9SXin Li
77*412f47f9SXin Li float64x2_t a = vabsq_f64 (x);
78*412f47f9SXin Li /* Reciprocal conditions that do not catch NaNs so they can be used in BSLs
79*412f47f9SXin Li to return expected results. */
80*412f47f9SXin Li uint64x2_t a_le_max = vcleq_f64 (a, dat->max);
81*412f47f9SXin Li uint64x2_t a_gt_max = vcgtq_f64 (a, dat->max);
82*412f47f9SXin Li
83*412f47f9SXin Li #if WANT_SIMD_EXCEPT
84*412f47f9SXin Li /* |x| huge or tiny. */
85*412f47f9SXin Li uint64x2_t cmp1 = vcgtq_f64 (a, dat->huge_bound);
86*412f47f9SXin Li uint64x2_t cmp2 = vcltq_f64 (a, dat->tiny_bound);
87*412f47f9SXin Li uint64x2_t cmp = vorrq_u64 (cmp1, cmp2);
88*412f47f9SXin Li /* If any lanes are special, mask them with 1 for small x or 8 for large
89*412f47f9SXin Li values and retain a copy of a to allow special case handler to fix special
90*412f47f9SXin Li lanes later. This is only necessary if fenv exceptions are to be triggered
91*412f47f9SXin Li correctly. */
92*412f47f9SXin Li if (unlikely (v_any_u64 (cmp)))
93*412f47f9SXin Li {
94*412f47f9SXin Li a = vbslq_f64 (cmp1, v_f64 (8.0), a);
95*412f47f9SXin Li a = vbslq_f64 (cmp2, v_f64 (1.0), a);
96*412f47f9SXin Li }
97*412f47f9SXin Li #endif
98*412f47f9SXin Li
99*412f47f9SXin Li /* Set r to multiple of 1/128 nearest to |x|. */
100*412f47f9SXin Li float64x2_t shift = dat->shift;
101*412f47f9SXin Li float64x2_t z = vaddq_f64 (a, shift);
102*412f47f9SXin Li
103*412f47f9SXin Li /* Lookup erf(r) and scale(r) in table, without shortcut for small values,
104*412f47f9SXin Li but with saturated indices for large values and NaNs in order to avoid
105*412f47f9SXin Li segfault. */
106*412f47f9SXin Li uint64x2_t i
107*412f47f9SXin Li = vsubq_u64 (vreinterpretq_u64_f64 (z), vreinterpretq_u64_f64 (shift));
108*412f47f9SXin Li i = vbslq_u64 (a_le_max, i, v_u64 (768));
109*412f47f9SXin Li struct entry e = lookup (i);
110*412f47f9SXin Li
111*412f47f9SXin Li float64x2_t r = vsubq_f64 (z, shift);
112*412f47f9SXin Li
113*412f47f9SXin Li /* erf(x) ~ erf(r) + scale * d * poly (r, d). */
114*412f47f9SXin Li float64x2_t d = vsubq_f64 (a, r);
115*412f47f9SXin Li float64x2_t d2 = vmulq_f64 (d, d);
116*412f47f9SXin Li float64x2_t r2 = vmulq_f64 (r, r);
117*412f47f9SXin Li
118*412f47f9SXin Li /* poly (d, r) = 1 + p1(r) * d + p2(r) * d^2 + ... + p5(r) * d^5. */
119*412f47f9SXin Li float64x2_t p1 = r;
120*412f47f9SXin Li float64x2_t p2
121*412f47f9SXin Li = vfmsq_f64 (dat->third, r2, vaddq_f64 (dat->third, dat->third));
122*412f47f9SXin Li float64x2_t p3 = vmulq_f64 (r, vfmaq_f64 (v_f64 (-0.5), r2, dat->third));
123*412f47f9SXin Li float64x2_t p4 = vfmaq_f64 (dat->two_over_five, r2, dat->two_over_fifteen);
124*412f47f9SXin Li p4 = vfmsq_f64 (dat->tenth, r2, p4);
125*412f47f9SXin Li float64x2_t p5 = vfmaq_f64 (dat->two_over_nine, r2, dat->two_over_fortyfive);
126*412f47f9SXin Li p5 = vmulq_f64 (r, vfmaq_f64 (vmulq_f64 (v_f64 (0.5), dat->third), r2, p5));
127*412f47f9SXin Li
128*412f47f9SXin Li float64x2_t p34 = vfmaq_f64 (p3, d, p4);
129*412f47f9SXin Li float64x2_t p12 = vfmaq_f64 (p1, d, p2);
130*412f47f9SXin Li float64x2_t y = vfmaq_f64 (p34, d2, p5);
131*412f47f9SXin Li y = vfmaq_f64 (p12, d2, y);
132*412f47f9SXin Li
133*412f47f9SXin Li y = vfmaq_f64 (e.erf, e.scale, vfmsq_f64 (d, d2, y));
134*412f47f9SXin Li
135*412f47f9SXin Li /* Solves the |x| = inf and NaN cases. */
136*412f47f9SXin Li y = vbslq_f64 (a_gt_max, v_f64 (1.0), y);
137*412f47f9SXin Li
138*412f47f9SXin Li /* Copy sign. */
139*412f47f9SXin Li y = vbslq_f64 (v_u64 (AbsMask), y, x);
140*412f47f9SXin Li
141*412f47f9SXin Li #if WANT_SIMD_EXCEPT
142*412f47f9SXin Li if (unlikely (v_any_u64 (cmp2)))
143*412f47f9SXin Li {
144*412f47f9SXin Li /* Neutralise huge values of x before fixing small values. */
145*412f47f9SXin Li x = vbslq_f64 (cmp1, v_f64 (1.0), x);
146*412f47f9SXin Li /* Fix tiny values that trigger spurious underflow. */
147*412f47f9SXin Li return vbslq_f64 (cmp2, vfmaq_f64 (x, dat->scale_minus_one, x), y);
148*412f47f9SXin Li }
149*412f47f9SXin Li #endif
150*412f47f9SXin Li return y;
151*412f47f9SXin Li }
152*412f47f9SXin Li
153*412f47f9SXin Li PL_SIG (V, D, 1, erf, -6.0, 6.0)
154*412f47f9SXin Li PL_TEST_ULP (V_NAME_D1 (erf), 1.79)
155*412f47f9SXin Li PL_TEST_EXPECT_FENV (V_NAME_D1 (erf), WANT_SIMD_EXCEPT)
156*412f47f9SXin Li PL_TEST_SYM_INTERVAL (V_NAME_D1 (erf), 0, 5.9921875, 40000)
157*412f47f9SXin Li PL_TEST_SYM_INTERVAL (V_NAME_D1 (erf), 5.9921875, inf, 40000)
158*412f47f9SXin Li PL_TEST_SYM_INTERVAL (V_NAME_D1 (erf), 0, inf, 40000)
159