1*412f47f9SXin Li /*
2*412f47f9SXin Li * Single-precision inverse error function (SVE variant).
3*412f47f9SXin Li *
4*412f47f9SXin Li * Copyright (c) 2024, Arm Limited.
5*412f47f9SXin Li * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6*412f47f9SXin Li */
7*412f47f9SXin Li #include "sv_math.h"
8*412f47f9SXin Li #include "pl_sig.h"
9*412f47f9SXin Li #include "pl_test.h"
10*412f47f9SXin Li #include "poly_sve_f32.h"
11*412f47f9SXin Li #include "sv_logf_inline.h"
12*412f47f9SXin Li
13*412f47f9SXin Li const static struct data
14*412f47f9SXin Li {
15*412f47f9SXin Li /* We use P_N and Q_N to refer to arrays of coefficients, where P_N
16*412f47f9SXin Li is the coeffs of the numerator in table N of Blair et al, and
17*412f47f9SXin Li Q_N is the coeffs of the denominator. Coefficients stored in
18*412f47f9SXin Li interleaved format to support lookup scheme. */
19*412f47f9SXin Li float P10_2, P29_3, Q10_2, Q29_2;
20*412f47f9SXin Li float P10_0, P29_1, P10_1, P29_2;
21*412f47f9SXin Li float Q10_0, Q29_0, Q10_1, Q29_1;
22*412f47f9SXin Li float P29_0, P_50[6], Q_50[2], tailshift;
23*412f47f9SXin Li struct sv_logf_data logf_tbl;
24*412f47f9SXin Li } data = { .P10_0 = -0x1.a31268p+3,
25*412f47f9SXin Li .P10_1 = 0x1.ac9048p+4,
26*412f47f9SXin Li .P10_2 = -0x1.293ff6p+3,
27*412f47f9SXin Li .P29_0 = -0x1.fc0252p-4,
28*412f47f9SXin Li .P29_1 = 0x1.119d44p+0,
29*412f47f9SXin Li .P29_2 = -0x1.f59ee2p+0,
30*412f47f9SXin Li .P29_3 = 0x1.b13626p-2,
31*412f47f9SXin Li .Q10_0 = -0x1.8265eep+3,
32*412f47f9SXin Li .Q10_1 = 0x1.ef5eaep+4,
33*412f47f9SXin Li .Q10_2 = -0x1.12665p+4,
34*412f47f9SXin Li .Q29_0 = -0x1.69952p-4,
35*412f47f9SXin Li .Q29_1 = 0x1.c7b7d2p-1,
36*412f47f9SXin Li .Q29_2 = -0x1.167d7p+1,
37*412f47f9SXin Li .P_50 = { 0x1.3d8948p-3, 0x1.61f9eap+0, 0x1.61c6bcp-1,
38*412f47f9SXin Li -0x1.20c9f2p+0, 0x1.5c704cp-1, -0x1.50c6bep-3 },
39*412f47f9SXin Li .Q_50 = { 0x1.3d7dacp-3, 0x1.629e5p+0 },
40*412f47f9SXin Li .tailshift = -0.87890625,
41*412f47f9SXin Li .logf_tbl = SV_LOGF_CONSTANTS };
42*412f47f9SXin Li
43*412f47f9SXin Li static inline svfloat32_t
special(svbool_t pg,svfloat32_t x,const struct data * d)44*412f47f9SXin Li special (svbool_t pg, svfloat32_t x, const struct data *d)
45*412f47f9SXin Li {
46*412f47f9SXin Li svfloat32_t ax = svabs_x (pg, x);
47*412f47f9SXin Li svfloat32_t t = svdivr_x (
48*412f47f9SXin Li pg,
49*412f47f9SXin Li svsqrt_x (pg, svneg_x (pg, sv_logf_inline (pg, svsubr_x (pg, ax, 1),
50*412f47f9SXin Li &d->logf_tbl))),
51*412f47f9SXin Li 1);
52*412f47f9SXin Li svuint32_t sign
53*412f47f9SXin Li = sveor_x (pg, svreinterpret_u32 (ax), svreinterpret_u32 (x));
54*412f47f9SXin Li svfloat32_t ts
55*412f47f9SXin Li = svreinterpret_f32 (svorr_x (pg, sign, svreinterpret_u32 (t)));
56*412f47f9SXin Li svfloat32_t q
57*412f47f9SXin Li = svmla_x (pg, sv_f32 (d->Q_50[0]), svadd_x (pg, t, d->Q_50[1]), t);
58*412f47f9SXin Li return svdiv_x (pg, sv_horner_5_f32_x (pg, t, d->P_50), svmul_x (pg, ts, q));
59*412f47f9SXin Li }
60*412f47f9SXin Li
61*412f47f9SXin Li static inline svfloat32_t
notails(svbool_t pg,svfloat32_t x,const struct data * d)62*412f47f9SXin Li notails (svbool_t pg, svfloat32_t x, const struct data *d)
63*412f47f9SXin Li {
64*412f47f9SXin Li /* Shortcut when no input is in a tail region - no need to gather shift or
65*412f47f9SXin Li coefficients. */
66*412f47f9SXin Li svfloat32_t t = svmad_x (pg, x, x, -0.5625);
67*412f47f9SXin Li svfloat32_t q = svadd_x (pg, t, d->Q10_2);
68*412f47f9SXin Li q = svmad_x (pg, t, q, d->Q10_1);
69*412f47f9SXin Li q = svmad_x (pg, t, q, d->Q10_0);
70*412f47f9SXin Li
71*412f47f9SXin Li svfloat32_t p = svmla_x (pg, sv_f32 (d->P10_1), t, d->P10_2);
72*412f47f9SXin Li p = svmad_x (pg, p, t, d->P10_0);
73*412f47f9SXin Li
74*412f47f9SXin Li return svdiv_x (pg, svmul_x (pg, x, p), q);
75*412f47f9SXin Li }
76*412f47f9SXin Li
77*412f47f9SXin Li /* Vector implementation of Blair et al's rational approximation to inverse
78*412f47f9SXin Li error function in single-precision. Worst-case error is 4.71 ULP, in the
79*412f47f9SXin Li tail region:
80*412f47f9SXin Li _ZGVsMxv_erfinvf(0x1.f84e9ap-1) got 0x1.b8326ap+0
81*412f47f9SXin Li want 0x1.b83274p+0. */
SV_NAME_F1(erfinv)82*412f47f9SXin Li svfloat32_t SV_NAME_F1 (erfinv) (svfloat32_t x, svbool_t pg)
83*412f47f9SXin Li {
84*412f47f9SXin Li const struct data *d = ptr_barrier (&data);
85*412f47f9SXin Li
86*412f47f9SXin Li /* Calculate inverse error using algorithm described in
87*412f47f9SXin Li J. M. Blair, C. A. Edwards, and J. H. Johnson,
88*412f47f9SXin Li "Rational Chebyshev approximations for the inverse of the error function",
89*412f47f9SXin Li Math. Comp. 30, pp. 827--830 (1976).
90*412f47f9SXin Li https://doi.org/10.1090/S0025-5718-1976-0421040-7. */
91*412f47f9SXin Li
92*412f47f9SXin Li /* Algorithm has 3 intervals:
93*412f47f9SXin Li - 'Normal' region [-0.75, 0.75]
94*412f47f9SXin Li - Tail region [0.75, 0.9375] U [-0.9375, -0.75]
95*412f47f9SXin Li - Extreme tail [-1, -0.9375] U [0.9375, 1]
96*412f47f9SXin Li Normal and tail are both rational approximation of similar order on
97*412f47f9SXin Li shifted input - these are typically performed in parallel using gather
98*412f47f9SXin Li loads to obtain correct coefficients depending on interval. */
99*412f47f9SXin Li svbool_t is_tail = svacge (pg, x, 0.75);
100*412f47f9SXin Li svbool_t extreme_tail = svacge (pg, x, 0.9375);
101*412f47f9SXin Li
102*412f47f9SXin Li if (likely (!svptest_any (pg, is_tail)))
103*412f47f9SXin Li return notails (pg, x, d);
104*412f47f9SXin Li
105*412f47f9SXin Li /* Select requisite shift depending on interval: polynomial is evaluated on
106*412f47f9SXin Li x * x - shift.
107*412f47f9SXin Li Normal shift = 0.5625
108*412f47f9SXin Li Tail shift = 0.87890625. */
109*412f47f9SXin Li svfloat32_t t = svmla_x (
110*412f47f9SXin Li pg, svsel (is_tail, sv_f32 (d->tailshift), sv_f32 (-0.5625)), x, x);
111*412f47f9SXin Li
112*412f47f9SXin Li svuint32_t idx = svdup_u32_z (is_tail, 1);
113*412f47f9SXin Li svuint32_t idxhi = svadd_x (pg, idx, 2);
114*412f47f9SXin Li
115*412f47f9SXin Li /* Load coeffs in quadwords and select them according to interval. */
116*412f47f9SXin Li svfloat32_t pqhi = svld1rq (svptrue_b32 (), &d->P10_2);
117*412f47f9SXin Li svfloat32_t plo = svld1rq (svptrue_b32 (), &d->P10_0);
118*412f47f9SXin Li svfloat32_t qlo = svld1rq (svptrue_b32 (), &d->Q10_0);
119*412f47f9SXin Li
120*412f47f9SXin Li svfloat32_t p2 = svtbl (pqhi, idx);
121*412f47f9SXin Li svfloat32_t p1 = svtbl (plo, idxhi);
122*412f47f9SXin Li svfloat32_t p0 = svtbl (plo, idx);
123*412f47f9SXin Li svfloat32_t q0 = svtbl (qlo, idx);
124*412f47f9SXin Li svfloat32_t q1 = svtbl (qlo, idxhi);
125*412f47f9SXin Li svfloat32_t q2 = svtbl (pqhi, idxhi);
126*412f47f9SXin Li
127*412f47f9SXin Li svfloat32_t p = svmla_x (pg, p1, p2, t);
128*412f47f9SXin Li p = svmla_x (pg, p0, p, t);
129*412f47f9SXin Li /* Tail polynomial has higher order - merge with normal lanes. */
130*412f47f9SXin Li p = svmad_m (is_tail, p, t, d->P29_0);
131*412f47f9SXin Li svfloat32_t y = svmul_x (pg, x, p);
132*412f47f9SXin Li
133*412f47f9SXin Li /* Least significant term of both Q polynomials is 1, so no need to generate
134*412f47f9SXin Li it. */
135*412f47f9SXin Li svfloat32_t q = svadd_x (pg, t, q2);
136*412f47f9SXin Li q = svmla_x (pg, q1, q, t);
137*412f47f9SXin Li q = svmla_x (pg, q0, q, t);
138*412f47f9SXin Li
139*412f47f9SXin Li if (unlikely (svptest_any (pg, extreme_tail)))
140*412f47f9SXin Li return svsel (extreme_tail, special (extreme_tail, x, d),
141*412f47f9SXin Li svdiv_x (pg, y, q));
142*412f47f9SXin Li return svdiv_x (pg, y, q);
143*412f47f9SXin Li }
144*412f47f9SXin Li
145*412f47f9SXin Li PL_SIG (SV, F, 1, erfinv, -0.99, 0.99)
146*412f47f9SXin Li PL_TEST_ULP (SV_NAME_F1 (erfinv), 4.09)
147*412f47f9SXin Li #define TEST_INTERVAL(lo, hi, n) \
148*412f47f9SXin Li PL_TEST_INTERVAL_C (SV_NAME_F1 (erfinv), lo, hi, n, 0.5) \
149*412f47f9SXin Li PL_TEST_INTERVAL_C (SV_NAME_F1 (erfinv), lo, hi, n, 0.8) \
150*412f47f9SXin Li PL_TEST_INTERVAL_C (SV_NAME_F1 (erfinv), lo, hi, n, 0.95)
151*412f47f9SXin Li TEST_INTERVAL (0, 1, 40000)
152*412f47f9SXin Li TEST_INTERVAL (-0, -1, 40000)
153