1*412f47f9SXin Li /*
2*412f47f9SXin Li * Single-precision inverse error function (AdvSIMD variant).
3*412f47f9SXin Li *
4*412f47f9SXin Li * Copyright (c) 2023-2024, Arm Limited.
5*412f47f9SXin Li * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6*412f47f9SXin Li */
7*412f47f9SXin Li #include "v_math.h"
8*412f47f9SXin Li #include "pl_sig.h"
9*412f47f9SXin Li #include "pl_test.h"
10*412f47f9SXin Li #include "poly_advsimd_f32.h"
11*412f47f9SXin Li #include "v_logf_inline.h"
12*412f47f9SXin Li
13*412f47f9SXin Li const static struct data
14*412f47f9SXin Li {
15*412f47f9SXin Li /* We use P_N and Q_N to refer to arrays of coefficients, where P_N is the
16*412f47f9SXin Li coeffs of the numerator in table N of Blair et al, and Q_N is the coeffs
17*412f47f9SXin Li of the denominator. Coefficients are stored in various interleaved
18*412f47f9SXin Li formats to allow for table-based (vector-to-vector) lookup.
19*412f47f9SXin Li
20*412f47f9SXin Li Plo is first two coefficients of P_10 and P_29 interleaved.
21*412f47f9SXin Li PQ is third coeff of P_10 and first of Q_29 interleaved.
22*412f47f9SXin Li Qhi is second and third coeffs of Q_29 interleaved.
23*412f47f9SXin Li P29_3 is a homogenous vector with fourth coeff of P_29.
24*412f47f9SXin Li
25*412f47f9SXin Li P_10 and Q_10 are also stored in homogenous vectors to allow better
26*412f47f9SXin Li memory access when no lanes are in a tail region. */
27*412f47f9SXin Li float Plo[4], PQ[4], Qhi[4];
28*412f47f9SXin Li float32x4_t P29_3, tailshift;
29*412f47f9SXin Li float32x4_t P_50[6], Q_50[2];
30*412f47f9SXin Li float32x4_t P_10[3], Q_10[3];
31*412f47f9SXin Li uint8_t idxhi[16], idxlo[16];
32*412f47f9SXin Li struct v_logf_data logf_tbl;
33*412f47f9SXin Li } data = {
34*412f47f9SXin Li .idxlo = { 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3 },
35*412f47f9SXin Li .idxhi = { 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11 },
36*412f47f9SXin Li .P29_3 = V4 (0x1.b13626p-2),
37*412f47f9SXin Li .tailshift = V4 (-0.87890625),
38*412f47f9SXin Li .Plo = { -0x1.a31268p+3, -0x1.fc0252p-4, 0x1.ac9048p+4, 0x1.119d44p+0 },
39*412f47f9SXin Li .PQ = { -0x1.293ff6p+3, -0x1.f59ee2p+0, -0x1.8265eep+3, -0x1.69952p-4 },
40*412f47f9SXin Li .Qhi = { 0x1.ef5eaep+4, 0x1.c7b7d2p-1, -0x1.12665p+4, -0x1.167d7p+1 },
41*412f47f9SXin Li .P_50 = { V4 (0x1.3d8948p-3), V4 (0x1.61f9eap+0), V4 (0x1.61c6bcp-1),
42*412f47f9SXin Li V4 (-0x1.20c9f2p+0), V4 (0x1.5c704cp-1), V4 (-0x1.50c6bep-3) },
43*412f47f9SXin Li .Q_50 = { V4 (0x1.3d7dacp-3), V4 (0x1.629e5p+0) },
44*412f47f9SXin Li .P_10 = { V4 (-0x1.a31268p+3), V4 (0x1.ac9048p+4), V4 (-0x1.293ff6p+3) },
45*412f47f9SXin Li .Q_10 = { V4 (-0x1.8265eep+3), V4 (0x1.ef5eaep+4), V4 (-0x1.12665p+4) },
46*412f47f9SXin Li .logf_tbl = V_LOGF_CONSTANTS
47*412f47f9SXin Li };
48*412f47f9SXin Li
49*412f47f9SXin Li static inline float32x4_t
special(float32x4_t x,const struct data * d)50*412f47f9SXin Li special (float32x4_t x, const struct data *d)
51*412f47f9SXin Li {
52*412f47f9SXin Li /* Note erfinvf(inf) should return NaN, and erfinvf(1) should return Inf.
53*412f47f9SXin Li By using log here, instead of log1p, we return finite values for both
54*412f47f9SXin Li these inputs, and values outside [-1, 1]. This is non-compliant, but is an
55*412f47f9SXin Li acceptable optimisation at Ofast. To get correct behaviour for all finite
56*412f47f9SXin Li values use the log1pf_inline helper on -abs(x) - note that erfinvf(inf)
57*412f47f9SXin Li will still be finite. */
58*412f47f9SXin Li float32x4_t t = vdivq_f32 (
59*412f47f9SXin Li v_f32 (1), vsqrtq_f32 (vnegq_f32 (v_logf_inline (
60*412f47f9SXin Li vsubq_f32 (v_f32 (1), vabsq_f32 (x)), &d->logf_tbl))));
61*412f47f9SXin Li float32x4_t ts = vbslq_f32 (v_u32 (0x7fffffff), t, x);
62*412f47f9SXin Li float32x4_t q = vfmaq_f32 (d->Q_50[0], vaddq_f32 (t, d->Q_50[1]), t);
63*412f47f9SXin Li return vdivq_f32 (v_horner_5_f32 (t, d->P_50), vmulq_f32 (ts, q));
64*412f47f9SXin Li }
65*412f47f9SXin Li
66*412f47f9SXin Li static inline float32x4_t
notails(float32x4_t x,const struct data * d)67*412f47f9SXin Li notails (float32x4_t x, const struct data *d)
68*412f47f9SXin Li {
69*412f47f9SXin Li /* Shortcut when no input is in a tail region - no need to gather shift or
70*412f47f9SXin Li coefficients. */
71*412f47f9SXin Li float32x4_t t = vfmaq_f32 (v_f32 (-0.5625), x, x);
72*412f47f9SXin Li float32x4_t q = vaddq_f32 (t, d->Q_10[2]);
73*412f47f9SXin Li q = vfmaq_f32 (d->Q_10[1], t, q);
74*412f47f9SXin Li q = vfmaq_f32 (d->Q_10[0], t, q);
75*412f47f9SXin Li
76*412f47f9SXin Li return vdivq_f32 (vmulq_f32 (x, v_horner_2_f32 (t, d->P_10)), q);
77*412f47f9SXin Li }
78*412f47f9SXin Li
79*412f47f9SXin Li static inline float32x4_t
lookup(float32x4_t tbl,uint8x16_t idx)80*412f47f9SXin Li lookup (float32x4_t tbl, uint8x16_t idx)
81*412f47f9SXin Li {
82*412f47f9SXin Li return vreinterpretq_f32_u8 (vqtbl1q_u8 (vreinterpretq_u8_f32 (tbl), idx));
83*412f47f9SXin Li }
84*412f47f9SXin Li
85*412f47f9SXin Li /* Vector implementation of Blair et al's rational approximation to inverse
86*412f47f9SXin Li error function in single-precision. Worst-case error is 4.98 ULP, in the
87*412f47f9SXin Li tail region:
88*412f47f9SXin Li _ZGVnN4v_erfinvf(0x1.f7dbeep-1) got 0x1.b4793p+0
89*412f47f9SXin Li want 0x1.b4793ap+0 . */
V_NAME_F1(erfinv)90*412f47f9SXin Li float32x4_t VPCS_ATTR V_NAME_F1 (erfinv) (float32x4_t x)
91*412f47f9SXin Li {
92*412f47f9SXin Li const struct data *d = ptr_barrier (&data);
93*412f47f9SXin Li
94*412f47f9SXin Li /* Calculate inverse error using algorithm described in
95*412f47f9SXin Li J. M. Blair, C. A. Edwards, and J. H. Johnson,
96*412f47f9SXin Li "Rational Chebyshev approximations for the inverse of the error
97*412f47f9SXin Li function", Math. Comp. 30, pp. 827--830 (1976).
98*412f47f9SXin Li https://doi.org/10.1090/S0025-5718-1976-0421040-7.
99*412f47f9SXin Li
100*412f47f9SXin Li Algorithm has 3 intervals:
101*412f47f9SXin Li - 'Normal' region [-0.75, 0.75]
102*412f47f9SXin Li - Tail region [0.75, 0.9375] U [-0.9375, -0.75]
103*412f47f9SXin Li - Extreme tail [-1, -0.9375] U [0.9375, 1]
104*412f47f9SXin Li Normal and tail are both rational approximation of similar order on
105*412f47f9SXin Li shifted input - these are typically performed in parallel using gather
106*412f47f9SXin Li loads to obtain correct coefficients depending on interval. */
107*412f47f9SXin Li uint32x4_t is_tail = vcageq_f32 (x, v_f32 (0.75));
108*412f47f9SXin Li uint32x4_t extreme_tail = vcageq_f32 (x, v_f32 (0.9375));
109*412f47f9SXin Li
110*412f47f9SXin Li if (unlikely (!v_any_u32 (is_tail)))
111*412f47f9SXin Li /* Shortcut for if all lanes are in [-0.75, 0.75] - can avoid having to
112*412f47f9SXin Li gather coefficients. If input is uniform in [-1, 1] then likelihood of
113*412f47f9SXin Li this is 0.75^4 ~= 0.31. */
114*412f47f9SXin Li return notails (x, d);
115*412f47f9SXin Li
116*412f47f9SXin Li /* Select requisite shift depending on interval: polynomial is evaluated on
117*412f47f9SXin Li x * x - shift.
118*412f47f9SXin Li Normal shift = 0.5625
119*412f47f9SXin Li Tail shift = 0.87890625. */
120*412f47f9SXin Li float32x4_t t
121*412f47f9SXin Li = vfmaq_f32 (vbslq_f32 (is_tail, d->tailshift, v_f32 (-0.5625)), x, x);
122*412f47f9SXin Li
123*412f47f9SXin Li /* Calculate indexes for tbl: tbl is byte-wise, so:
124*412f47f9SXin Li [0, 1, 2, 3, 4, 5, 6, ....] copies the vector
125*412f47f9SXin Li Add 4 * i to a group of 4 lanes to copy 32-bit lane i. Each vector stores
126*412f47f9SXin Li two pairs of coeffs, so we need two idx vectors - one for each pair. */
127*412f47f9SXin Li uint8x16_t off = vandq_u8 (vreinterpretq_u8_u32 (is_tail), vdupq_n_u8 (4));
128*412f47f9SXin Li uint8x16_t idx_lo = vaddq_u8 (vld1q_u8 (d->idxlo), off);
129*412f47f9SXin Li uint8x16_t idx_hi = vaddq_u8 (vld1q_u8 (d->idxhi), off);
130*412f47f9SXin Li
131*412f47f9SXin Li /* Load the tables. */
132*412f47f9SXin Li float32x4_t plo = vld1q_f32 (d->Plo);
133*412f47f9SXin Li float32x4_t pq = vld1q_f32 (d->PQ);
134*412f47f9SXin Li float32x4_t qhi = vld1q_f32 (d->Qhi);
135*412f47f9SXin Li
136*412f47f9SXin Li /* Do the lookup (and calculate p3 by masking non-tail lanes). */
137*412f47f9SXin Li float32x4_t p3 = vreinterpretq_f32_u32 (
138*412f47f9SXin Li vandq_u32 (is_tail, vreinterpretq_u32_f32 (d->P29_3)));
139*412f47f9SXin Li float32x4_t p0 = lookup (plo, idx_lo), p1 = lookup (plo, idx_hi),
140*412f47f9SXin Li p2 = lookup (pq, idx_lo), q0 = lookup (pq, idx_hi),
141*412f47f9SXin Li q1 = lookup (qhi, idx_lo), q2 = lookup (qhi, idx_hi);
142*412f47f9SXin Li
143*412f47f9SXin Li float32x4_t p = vfmaq_f32 (p2, p3, t);
144*412f47f9SXin Li p = vfmaq_f32 (p1, p, t);
145*412f47f9SXin Li p = vfmaq_f32 (p0, p, t);
146*412f47f9SXin Li p = vmulq_f32 (x, p);
147*412f47f9SXin Li
148*412f47f9SXin Li float32x4_t q = vfmaq_f32 (q1, vaddq_f32 (q2, t), t);
149*412f47f9SXin Li q = vfmaq_f32 (q0, q, t);
150*412f47f9SXin Li
151*412f47f9SXin Li if (unlikely (v_any_u32 (extreme_tail)))
152*412f47f9SXin Li /* At least one lane is in the extreme tail - if input is uniform in
153*412f47f9SXin Li [-1, 1] the likelihood of this is ~0.23. */
154*412f47f9SXin Li return vbslq_f32 (extreme_tail, special (x, d), vdivq_f32 (p, q));
155*412f47f9SXin Li
156*412f47f9SXin Li return vdivq_f32 (p, q);
157*412f47f9SXin Li }
158*412f47f9SXin Li
159*412f47f9SXin Li PL_SIG (V, F, 1, erfinv, -0.99, 0.99)
160*412f47f9SXin Li PL_TEST_ULP (V_NAME_F1 (erfinv), 4.49)
161*412f47f9SXin Li /* Test with control lane in each interval. */
162*412f47f9SXin Li PL_TEST_SYM_INTERVAL_C (V_NAME_F1 (erfinv), 0, 0x1.fffffep-1, 40000, 0.5)
163*412f47f9SXin Li PL_TEST_SYM_INTERVAL_C (V_NAME_F1 (erfinv), 0, 0x1.fffffep-1, 40000, 0.8)
164*412f47f9SXin Li PL_TEST_SYM_INTERVAL_C (V_NAME_F1 (erfinv), 0, 0x1.fffffep-1, 40000, 0.95)
165