1*412f47f9SXin Li /*
2*412f47f9SXin Li * Single-precision vector erf(x) function.
3*412f47f9SXin Li *
4*412f47f9SXin Li * Copyright (c) 2023-2024, Arm Limited.
5*412f47f9SXin Li * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6*412f47f9SXin Li */
7*412f47f9SXin Li
8*412f47f9SXin Li #include "v_math.h"
9*412f47f9SXin Li #include "pl_sig.h"
10*412f47f9SXin Li #include "pl_test.h"
11*412f47f9SXin Li
12*412f47f9SXin Li static const struct data
13*412f47f9SXin Li {
14*412f47f9SXin Li float32x4_t max, shift, third;
15*412f47f9SXin Li #if WANT_SIMD_EXCEPT
16*412f47f9SXin Li float32x4_t tiny_bound, scale_minus_one;
17*412f47f9SXin Li #endif
18*412f47f9SXin Li } data = {
19*412f47f9SXin Li .max = V4 (3.9375), /* 4 - 8/128. */
20*412f47f9SXin Li .shift = V4 (0x1p16f),
21*412f47f9SXin Li .third = V4 (0x1.555556p-2f), /* 1/3. */
22*412f47f9SXin Li #if WANT_SIMD_EXCEPT
23*412f47f9SXin Li .tiny_bound = V4 (0x1p-62f),
24*412f47f9SXin Li .scale_minus_one = V4 (0x1.06eba8p-3f), /* scale - 1.0. */
25*412f47f9SXin Li #endif
26*412f47f9SXin Li };
27*412f47f9SXin Li
28*412f47f9SXin Li #define AbsMask 0x7fffffff
29*412f47f9SXin Li
30*412f47f9SXin Li struct entry
31*412f47f9SXin Li {
32*412f47f9SXin Li float32x4_t erf;
33*412f47f9SXin Li float32x4_t scale;
34*412f47f9SXin Li };
35*412f47f9SXin Li
36*412f47f9SXin Li static inline struct entry
lookup(uint32x4_t i)37*412f47f9SXin Li lookup (uint32x4_t i)
38*412f47f9SXin Li {
39*412f47f9SXin Li struct entry e;
40*412f47f9SXin Li float32x2_t t0 = vld1_f32 (&__erff_data.tab[vgetq_lane_u32 (i, 0)].erf);
41*412f47f9SXin Li float32x2_t t1 = vld1_f32 (&__erff_data.tab[vgetq_lane_u32 (i, 1)].erf);
42*412f47f9SXin Li float32x2_t t2 = vld1_f32 (&__erff_data.tab[vgetq_lane_u32 (i, 2)].erf);
43*412f47f9SXin Li float32x2_t t3 = vld1_f32 (&__erff_data.tab[vgetq_lane_u32 (i, 3)].erf);
44*412f47f9SXin Li float32x4_t e1 = vcombine_f32 (t0, t1);
45*412f47f9SXin Li float32x4_t e2 = vcombine_f32 (t2, t3);
46*412f47f9SXin Li e.erf = vuzp1q_f32 (e1, e2);
47*412f47f9SXin Li e.scale = vuzp2q_f32 (e1, e2);
48*412f47f9SXin Li return e;
49*412f47f9SXin Li }
50*412f47f9SXin Li
51*412f47f9SXin Li /* Single-precision implementation of vector erf(x).
52*412f47f9SXin Li Approximation based on series expansion near x rounded to
53*412f47f9SXin Li nearest multiple of 1/128.
54*412f47f9SXin Li Let d = x - r, and scale = 2 / sqrt(pi) * exp(-r^2). For x near r,
55*412f47f9SXin Li
56*412f47f9SXin Li erf(x) ~ erf(r) + scale * d * [1 - r * d - 1/3 * d^2]
57*412f47f9SXin Li
58*412f47f9SXin Li Values of erf(r) and scale are read from lookup tables.
59*412f47f9SXin Li For |x| > 3.9375, erf(|x|) rounds to 1.0f.
60*412f47f9SXin Li
61*412f47f9SXin Li Maximum error: 1.93 ULP
62*412f47f9SXin Li _ZGVnN4v_erff(0x1.c373e6p-9) got 0x1.fd686cp-9
63*412f47f9SXin Li want 0x1.fd6868p-9. */
V_NAME_F1(erf)64*412f47f9SXin Li float32x4_t VPCS_ATTR V_NAME_F1 (erf) (float32x4_t x)
65*412f47f9SXin Li {
66*412f47f9SXin Li const struct data *dat = ptr_barrier (&data);
67*412f47f9SXin Li
68*412f47f9SXin Li #if WANT_SIMD_EXCEPT
69*412f47f9SXin Li /* |x| < 2^-62. */
70*412f47f9SXin Li uint32x4_t cmp = vcaltq_f32 (x, dat->tiny_bound);
71*412f47f9SXin Li float32x4_t xm = x;
72*412f47f9SXin Li /* If any lanes are special, mask them with 1 and retain a copy of x to allow
73*412f47f9SXin Li special case handler to fix special lanes later. This is only necessary if
74*412f47f9SXin Li fenv exceptions are to be triggered correctly. */
75*412f47f9SXin Li if (unlikely (v_any_u32 (cmp)))
76*412f47f9SXin Li x = vbslq_f32 (cmp, v_f32 (1), x);
77*412f47f9SXin Li #endif
78*412f47f9SXin Li
79*412f47f9SXin Li float32x4_t a = vabsq_f32 (x);
80*412f47f9SXin Li uint32x4_t a_gt_max = vcgtq_f32 (a, dat->max);
81*412f47f9SXin Li
82*412f47f9SXin Li /* Lookup erf(r) and scale(r) in tables, e.g. set erf(r) to 0 and scale to
83*412f47f9SXin Li 2/sqrt(pi), when x reduced to r = 0. */
84*412f47f9SXin Li float32x4_t shift = dat->shift;
85*412f47f9SXin Li float32x4_t z = vaddq_f32 (a, shift);
86*412f47f9SXin Li
87*412f47f9SXin Li uint32x4_t i
88*412f47f9SXin Li = vsubq_u32 (vreinterpretq_u32_f32 (z), vreinterpretq_u32_f32 (shift));
89*412f47f9SXin Li i = vminq_u32 (i, v_u32 (512));
90*412f47f9SXin Li struct entry e = lookup (i);
91*412f47f9SXin Li
92*412f47f9SXin Li float32x4_t r = vsubq_f32 (z, shift);
93*412f47f9SXin Li
94*412f47f9SXin Li /* erf(x) ~ erf(r) + scale * d * (1 - r * d - 1/3 * d^2). */
95*412f47f9SXin Li float32x4_t d = vsubq_f32 (a, r);
96*412f47f9SXin Li float32x4_t d2 = vmulq_f32 (d, d);
97*412f47f9SXin Li float32x4_t y = vfmaq_f32 (r, dat->third, d);
98*412f47f9SXin Li y = vfmaq_f32 (e.erf, e.scale, vfmsq_f32 (d, d2, y));
99*412f47f9SXin Li
100*412f47f9SXin Li /* Solves the |x| = inf case. */
101*412f47f9SXin Li y = vbslq_f32 (a_gt_max, v_f32 (1.0f), y);
102*412f47f9SXin Li
103*412f47f9SXin Li /* Copy sign. */
104*412f47f9SXin Li y = vbslq_f32 (v_u32 (AbsMask), y, x);
105*412f47f9SXin Li
106*412f47f9SXin Li #if WANT_SIMD_EXCEPT
107*412f47f9SXin Li if (unlikely (v_any_u32 (cmp)))
108*412f47f9SXin Li return vbslq_f32 (cmp, vfmaq_f32 (xm, dat->scale_minus_one, xm), y);
109*412f47f9SXin Li #endif
110*412f47f9SXin Li return y;
111*412f47f9SXin Li }
112*412f47f9SXin Li
113*412f47f9SXin Li PL_SIG (V, F, 1, erf, -4.0, 4.0)
114*412f47f9SXin Li PL_TEST_ULP (V_NAME_F1 (erf), 1.43)
115*412f47f9SXin Li PL_TEST_EXPECT_FENV (V_NAME_F1 (erf), WANT_SIMD_EXCEPT)
116*412f47f9SXin Li PL_TEST_SYM_INTERVAL (V_NAME_F1 (erf), 0, 3.9375, 40000)
117*412f47f9SXin Li PL_TEST_SYM_INTERVAL (V_NAME_F1 (erf), 3.9375, inf, 40000)
118*412f47f9SXin Li PL_TEST_SYM_INTERVAL (V_NAME_F1 (erf), 0, inf, 40000)
119