1*412f47f9SXin Li /*
2*412f47f9SXin Li * Double-precision vector pow function.
3*412f47f9SXin Li *
4*412f47f9SXin Li * Copyright (c) 2020-2024, Arm Limited.
5*412f47f9SXin Li * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6*412f47f9SXin Li */
7*412f47f9SXin Li
8*412f47f9SXin Li #include "v_math.h"
9*412f47f9SXin Li #include "pl_sig.h"
10*412f47f9SXin Li #include "pl_test.h"
11*412f47f9SXin Li
12*412f47f9SXin Li /* Defines parameters of the approximation and scalar fallback. */
13*412f47f9SXin Li #include "finite_pow.h"
14*412f47f9SXin Li
15*412f47f9SXin Li #define VecSmallExp v_u64 (SmallExp)
16*412f47f9SXin Li #define VecThresExp v_u64 (ThresExp)
17*412f47f9SXin Li
18*412f47f9SXin Li #define VecSmallPowX v_u64 (SmallPowX)
19*412f47f9SXin Li #define VecThresPowX v_u64 (ThresPowX)
20*412f47f9SXin Li #define VecSmallPowY v_u64 (SmallPowY)
21*412f47f9SXin Li #define VecThresPowY v_u64 (ThresPowY)
22*412f47f9SXin Li
23*412f47f9SXin Li static const struct data
24*412f47f9SXin Li {
25*412f47f9SXin Li float64x2_t log_poly[6];
26*412f47f9SXin Li float64x2_t exp_poly[3];
27*412f47f9SXin Li float64x2_t ln2_hi, ln2_lo;
28*412f47f9SXin Li float64x2_t shift, inv_ln2_n, ln2_hi_n, ln2_lo_n, small_powx;
29*412f47f9SXin Li uint64x2_t inf;
30*412f47f9SXin Li } data = {
31*412f47f9SXin Li /* Coefficients copied from v_pow_log_data.c
32*412f47f9SXin Li relative error: 0x1.11922ap-70 in [-0x1.6bp-8, 0x1.6bp-8]
33*412f47f9SXin Li Coefficients are scaled to match the scaling during evaluation. */
34*412f47f9SXin Li .log_poly
35*412f47f9SXin Li = { V2 (0x1.555555555556p-2 * -2), V2 (-0x1.0000000000006p-2 * -2),
36*412f47f9SXin Li V2 (0x1.999999959554ep-3 * 4), V2 (-0x1.555555529a47ap-3 * 4),
37*412f47f9SXin Li V2 (0x1.2495b9b4845e9p-3 * -8), V2 (-0x1.0002b8b263fc3p-3 * -8) },
38*412f47f9SXin Li .ln2_hi = V2 (0x1.62e42fefa3800p-1),
39*412f47f9SXin Li .ln2_lo = V2 (0x1.ef35793c76730p-45),
40*412f47f9SXin Li /* Polynomial coefficients: abs error: 1.43*2^-58, ulp error: 0.549
41*412f47f9SXin Li (0.550 without fma) if |x| < ln2/512. */
42*412f47f9SXin Li .exp_poly = { V2 (0x1.fffffffffffd4p-2), V2 (0x1.5555571d6ef9p-3),
43*412f47f9SXin Li V2 (0x1.5555576a5adcep-5) },
44*412f47f9SXin Li .shift = V2 (0x1.8p52), /* round to nearest int. without intrinsics. */
45*412f47f9SXin Li .inv_ln2_n = V2 (0x1.71547652b82fep8), /* N/ln2. */
46*412f47f9SXin Li .ln2_hi_n = V2 (0x1.62e42fefc0000p-9), /* ln2/N. */
47*412f47f9SXin Li .ln2_lo_n = V2 (-0x1.c610ca86c3899p-45),
48*412f47f9SXin Li .small_powx = V2 (0x1p-126),
49*412f47f9SXin Li .inf = V2 (0x7ff0000000000000)
50*412f47f9SXin Li };
51*412f47f9SXin Li
52*412f47f9SXin Li #define A(i) data.log_poly[i]
53*412f47f9SXin Li #define C(i) data.exp_poly[i]
54*412f47f9SXin Li
55*412f47f9SXin Li /* This version implements an algorithm close to scalar pow but
56*412f47f9SXin Li - does not implement the trick in the exp's specialcase subroutine to avoid
57*412f47f9SXin Li double-rounding,
58*412f47f9SXin Li - does not use a tail in the exponential core computation,
59*412f47f9SXin Li - and pow's exp polynomial order and table bits might differ.
60*412f47f9SXin Li
61*412f47f9SXin Li Maximum measured error is 1.04 ULPs:
62*412f47f9SXin Li _ZGVnN2vv_pow(0x1.024a3e56b3c3p-136, 0x1.87910248b58acp-13)
63*412f47f9SXin Li got 0x1.f71162f473251p-1
64*412f47f9SXin Li want 0x1.f71162f473252p-1. */
65*412f47f9SXin Li
66*412f47f9SXin Li static inline float64x2_t
v_masked_lookup_f64(const double * table,uint64x2_t i)67*412f47f9SXin Li v_masked_lookup_f64 (const double *table, uint64x2_t i)
68*412f47f9SXin Li {
69*412f47f9SXin Li return (float64x2_t){
70*412f47f9SXin Li table[(i[0] >> (52 - V_POW_LOG_TABLE_BITS)) & (N_LOG - 1)],
71*412f47f9SXin Li table[(i[1] >> (52 - V_POW_LOG_TABLE_BITS)) & (N_LOG - 1)]
72*412f47f9SXin Li };
73*412f47f9SXin Li }
74*412f47f9SXin Li
75*412f47f9SXin Li /* Compute y+TAIL = log(x) where the rounded result is y and TAIL has about
76*412f47f9SXin Li additional 15 bits precision. IX is the bit representation of x, but
77*412f47f9SXin Li normalized in the subnormal range using the sign bit for the exponent. */
78*412f47f9SXin Li static inline float64x2_t
v_log_inline(uint64x2_t ix,float64x2_t * tail,const struct data * d)79*412f47f9SXin Li v_log_inline (uint64x2_t ix, float64x2_t *tail, const struct data *d)
80*412f47f9SXin Li {
81*412f47f9SXin Li /* x = 2^k z; where z is in range [OFF,2*OFF) and exact.
82*412f47f9SXin Li The range is split into N subintervals.
83*412f47f9SXin Li The ith subinterval contains z and c is near its center. */
84*412f47f9SXin Li uint64x2_t tmp = vsubq_u64 (ix, v_u64 (Off));
85*412f47f9SXin Li int64x2_t k
86*412f47f9SXin Li = vshrq_n_s64 (vreinterpretq_s64_u64 (tmp), 52); /* arithmetic shift. */
87*412f47f9SXin Li uint64x2_t iz = vsubq_u64 (ix, vandq_u64 (tmp, v_u64 (0xfffULL << 52)));
88*412f47f9SXin Li float64x2_t z = vreinterpretq_f64_u64 (iz);
89*412f47f9SXin Li float64x2_t kd = vcvtq_f64_s64 (k);
90*412f47f9SXin Li /* log(x) = k*Ln2 + log(c) + log1p(z/c-1). */
91*412f47f9SXin Li float64x2_t invc = v_masked_lookup_f64 (__v_pow_log_data.invc, tmp);
92*412f47f9SXin Li float64x2_t logc = v_masked_lookup_f64 (__v_pow_log_data.logc, tmp);
93*412f47f9SXin Li float64x2_t logctail = v_masked_lookup_f64 (__v_pow_log_data.logctail, tmp);
94*412f47f9SXin Li /* Note: 1/c is j/N or j/N/2 where j is an integer in [N,2N) and
95*412f47f9SXin Li |z/c - 1| < 1/N, so r = z/c - 1 is exactly representible. */
96*412f47f9SXin Li float64x2_t r = vfmaq_f64 (v_f64 (-1.0), z, invc);
97*412f47f9SXin Li /* k*Ln2 + log(c) + r. */
98*412f47f9SXin Li float64x2_t t1 = vfmaq_f64 (logc, kd, d->ln2_hi);
99*412f47f9SXin Li float64x2_t t2 = vaddq_f64 (t1, r);
100*412f47f9SXin Li float64x2_t lo1 = vfmaq_f64 (logctail, kd, d->ln2_lo);
101*412f47f9SXin Li float64x2_t lo2 = vaddq_f64 (vsubq_f64 (t1, t2), r);
102*412f47f9SXin Li /* Evaluation is optimized assuming superscalar pipelined execution. */
103*412f47f9SXin Li float64x2_t ar = vmulq_f64 (v_f64 (-0.5), r);
104*412f47f9SXin Li float64x2_t ar2 = vmulq_f64 (r, ar);
105*412f47f9SXin Li float64x2_t ar3 = vmulq_f64 (r, ar2);
106*412f47f9SXin Li /* k*Ln2 + log(c) + r + A[0]*r*r. */
107*412f47f9SXin Li float64x2_t hi = vaddq_f64 (t2, ar2);
108*412f47f9SXin Li float64x2_t lo3 = vfmaq_f64 (vnegq_f64 (ar2), ar, r);
109*412f47f9SXin Li float64x2_t lo4 = vaddq_f64 (vsubq_f64 (t2, hi), ar2);
110*412f47f9SXin Li /* p = log1p(r) - r - A[0]*r*r. */
111*412f47f9SXin Li float64x2_t a56 = vfmaq_f64 (A (4), r, A (5));
112*412f47f9SXin Li float64x2_t a34 = vfmaq_f64 (A (2), r, A (3));
113*412f47f9SXin Li float64x2_t a12 = vfmaq_f64 (A (0), r, A (1));
114*412f47f9SXin Li float64x2_t p = vfmaq_f64 (a34, ar2, a56);
115*412f47f9SXin Li p = vfmaq_f64 (a12, ar2, p);
116*412f47f9SXin Li p = vmulq_f64 (ar3, p);
117*412f47f9SXin Li float64x2_t lo
118*412f47f9SXin Li = vaddq_f64 (vaddq_f64 (vaddq_f64 (vaddq_f64 (lo1, lo2), lo3), lo4), p);
119*412f47f9SXin Li float64x2_t y = vaddq_f64 (hi, lo);
120*412f47f9SXin Li *tail = vaddq_f64 (vsubq_f64 (hi, y), lo);
121*412f47f9SXin Li return y;
122*412f47f9SXin Li }
123*412f47f9SXin Li
124*412f47f9SXin Li static float64x2_t VPCS_ATTR NOINLINE
exp_special_case(float64x2_t x,float64x2_t xtail)125*412f47f9SXin Li exp_special_case (float64x2_t x, float64x2_t xtail)
126*412f47f9SXin Li {
127*412f47f9SXin Li return (float64x2_t){ exp_nosignbias (x[0], xtail[0]),
128*412f47f9SXin Li exp_nosignbias (x[1], xtail[1]) };
129*412f47f9SXin Li }
130*412f47f9SXin Li
131*412f47f9SXin Li /* Computes sign*exp(x+xtail) where |xtail| < 2^-8/N and |xtail| <= |x|. */
132*412f47f9SXin Li static inline float64x2_t
v_exp_inline(float64x2_t x,float64x2_t xtail,const struct data * d)133*412f47f9SXin Li v_exp_inline (float64x2_t x, float64x2_t xtail, const struct data *d)
134*412f47f9SXin Li {
135*412f47f9SXin Li /* Fallback to scalar exp_inline for all lanes if any lane
136*412f47f9SXin Li contains value of x s.t. |x| <= 2^-54 or >= 512. */
137*412f47f9SXin Li uint64x2_t abstop
138*412f47f9SXin Li = vshrq_n_u64 (vandq_u64 (vreinterpretq_u64_f64 (x), d->inf), 52);
139*412f47f9SXin Li uint64x2_t uoflowx
140*412f47f9SXin Li = vcgeq_u64 (vsubq_u64 (abstop, VecSmallExp), VecThresExp);
141*412f47f9SXin Li if (unlikely (v_any_u64 (uoflowx)))
142*412f47f9SXin Li return exp_special_case (x, xtail);
143*412f47f9SXin Li
144*412f47f9SXin Li /* exp(x) = 2^(k/N) * exp(r), with exp(r) in [2^(-1/2N),2^(1/2N)]. */
145*412f47f9SXin Li /* x = ln2/N*k + r, with k integer and r in [-ln2/2N, ln2/2N]. */
146*412f47f9SXin Li float64x2_t z = vmulq_f64 (d->inv_ln2_n, x);
147*412f47f9SXin Li /* z - kd is in [-1, 1] in non-nearest rounding modes. */
148*412f47f9SXin Li float64x2_t kd = vaddq_f64 (z, d->shift);
149*412f47f9SXin Li uint64x2_t ki = vreinterpretq_u64_f64 (kd);
150*412f47f9SXin Li kd = vsubq_f64 (kd, d->shift);
151*412f47f9SXin Li float64x2_t r = vfmsq_f64 (x, kd, d->ln2_hi_n);
152*412f47f9SXin Li r = vfmsq_f64 (r, kd, d->ln2_lo_n);
153*412f47f9SXin Li /* The code assumes 2^-200 < |xtail| < 2^-8/N. */
154*412f47f9SXin Li r = vaddq_f64 (r, xtail);
155*412f47f9SXin Li /* 2^(k/N) ~= scale. */
156*412f47f9SXin Li uint64x2_t idx = vandq_u64 (ki, v_u64 (N_EXP - 1));
157*412f47f9SXin Li uint64x2_t top = vshlq_n_u64 (ki, 52 - V_POW_EXP_TABLE_BITS);
158*412f47f9SXin Li /* This is only a valid scale when -1023*N < k < 1024*N. */
159*412f47f9SXin Li uint64x2_t sbits = v_lookup_u64 (SBits, idx);
160*412f47f9SXin Li sbits = vaddq_u64 (sbits, top);
161*412f47f9SXin Li /* exp(x) = 2^(k/N) * exp(r) ~= scale + scale * (exp(r) - 1). */
162*412f47f9SXin Li float64x2_t r2 = vmulq_f64 (r, r);
163*412f47f9SXin Li float64x2_t tmp = vfmaq_f64 (C (1), r, C (2));
164*412f47f9SXin Li tmp = vfmaq_f64 (C (0), r, tmp);
165*412f47f9SXin Li tmp = vfmaq_f64 (r, r2, tmp);
166*412f47f9SXin Li float64x2_t scale = vreinterpretq_f64_u64 (sbits);
167*412f47f9SXin Li /* Note: tmp == 0 or |tmp| > 2^-200 and scale > 2^-739, so there
168*412f47f9SXin Li is no spurious underflow here even without fma. */
169*412f47f9SXin Li return vfmaq_f64 (scale, scale, tmp);
170*412f47f9SXin Li }
171*412f47f9SXin Li
172*412f47f9SXin Li static float64x2_t NOINLINE VPCS_ATTR
scalar_fallback(float64x2_t x,float64x2_t y)173*412f47f9SXin Li scalar_fallback (float64x2_t x, float64x2_t y)
174*412f47f9SXin Li {
175*412f47f9SXin Li return (float64x2_t){ pow_scalar_special_case (x[0], y[0]),
176*412f47f9SXin Li pow_scalar_special_case (x[1], y[1]) };
177*412f47f9SXin Li }
178*412f47f9SXin Li
V_NAME_D2(pow)179*412f47f9SXin Li float64x2_t VPCS_ATTR V_NAME_D2 (pow) (float64x2_t x, float64x2_t y)
180*412f47f9SXin Li {
181*412f47f9SXin Li const struct data *d = ptr_barrier (&data);
182*412f47f9SXin Li /* Case of x <= 0 is too complicated to be vectorised efficiently here,
183*412f47f9SXin Li fallback to scalar pow for all lanes if any x < 0 detected. */
184*412f47f9SXin Li if (v_any_u64 (vclezq_s64 (vreinterpretq_s64_f64 (x))))
185*412f47f9SXin Li return scalar_fallback (x, y);
186*412f47f9SXin Li
187*412f47f9SXin Li uint64x2_t vix = vreinterpretq_u64_f64 (x);
188*412f47f9SXin Li uint64x2_t viy = vreinterpretq_u64_f64 (y);
189*412f47f9SXin Li uint64x2_t iay = vandq_u64 (viy, d->inf);
190*412f47f9SXin Li
191*412f47f9SXin Li /* Special cases of x or y. */
192*412f47f9SXin Li #if WANT_SIMD_EXCEPT
193*412f47f9SXin Li /* Small or large. */
194*412f47f9SXin Li uint64x2_t vtopx = vshrq_n_u64 (vix, 52);
195*412f47f9SXin Li uint64x2_t vabstopy = vshrq_n_u64 (iay, 52);
196*412f47f9SXin Li uint64x2_t specialx
197*412f47f9SXin Li = vcgeq_u64 (vsubq_u64 (vtopx, VecSmallPowX), VecThresPowX);
198*412f47f9SXin Li uint64x2_t specialy
199*412f47f9SXin Li = vcgeq_u64 (vsubq_u64 (vabstopy, VecSmallPowY), VecThresPowY);
200*412f47f9SXin Li #else
201*412f47f9SXin Li /* The case y==0 does not trigger a special case, since in this case it is
202*412f47f9SXin Li necessary to fix the result only if x is a signalling nan, which already
203*412f47f9SXin Li triggers a special case. We test y==0 directly in the scalar fallback. */
204*412f47f9SXin Li uint64x2_t iax = vandq_u64 (vix, d->inf);
205*412f47f9SXin Li uint64x2_t specialx = vcgeq_u64 (iax, d->inf);
206*412f47f9SXin Li uint64x2_t specialy = vcgeq_u64 (iay, d->inf);
207*412f47f9SXin Li #endif
208*412f47f9SXin Li uint64x2_t special = vorrq_u64 (specialx, specialy);
209*412f47f9SXin Li /* Fallback to scalar on all lanes if any lane is inf or nan. */
210*412f47f9SXin Li if (unlikely (v_any_u64 (special)))
211*412f47f9SXin Li return scalar_fallback (x, y);
212*412f47f9SXin Li
213*412f47f9SXin Li /* Small cases of x: |x| < 0x1p-126. */
214*412f47f9SXin Li uint64x2_t smallx = vcaltq_f64 (x, d->small_powx);
215*412f47f9SXin Li if (unlikely (v_any_u64 (smallx)))
216*412f47f9SXin Li {
217*412f47f9SXin Li /* Update ix if top 12 bits of x are 0. */
218*412f47f9SXin Li uint64x2_t sub_x = vceqzq_u64 (vshrq_n_u64 (vix, 52));
219*412f47f9SXin Li if (unlikely (v_any_u64 (sub_x)))
220*412f47f9SXin Li {
221*412f47f9SXin Li /* Normalize subnormal x so exponent becomes negative. */
222*412f47f9SXin Li uint64x2_t vix_norm = vreinterpretq_u64_f64 (
223*412f47f9SXin Li vabsq_f64 (vmulq_f64 (x, vcvtq_f64_u64 (v_u64 (1ULL << 52)))));
224*412f47f9SXin Li vix_norm = vsubq_u64 (vix_norm, v_u64 (52ULL << 52));
225*412f47f9SXin Li vix = vbslq_u64 (sub_x, vix_norm, vix);
226*412f47f9SXin Li }
227*412f47f9SXin Li }
228*412f47f9SXin Li
229*412f47f9SXin Li /* Vector Log(ix, &lo). */
230*412f47f9SXin Li float64x2_t vlo;
231*412f47f9SXin Li float64x2_t vhi = v_log_inline (vix, &vlo, d);
232*412f47f9SXin Li
233*412f47f9SXin Li /* Vector Exp(y_loghi, y_loglo). */
234*412f47f9SXin Li float64x2_t vehi = vmulq_f64 (y, vhi);
235*412f47f9SXin Li float64x2_t velo = vmulq_f64 (y, vlo);
236*412f47f9SXin Li float64x2_t vemi = vfmsq_f64 (vehi, y, vhi);
237*412f47f9SXin Li velo = vsubq_f64 (velo, vemi);
238*412f47f9SXin Li return v_exp_inline (vehi, velo, d);
239*412f47f9SXin Li }
240*412f47f9SXin Li
241*412f47f9SXin Li PL_SIG (V, D, 2, pow)
242*412f47f9SXin Li PL_TEST_ULP (V_NAME_D2 (pow), 0.55)
243*412f47f9SXin Li PL_TEST_EXPECT_FENV (V_NAME_D2 (pow), WANT_SIMD_EXCEPT)
244*412f47f9SXin Li /* Wide intervals spanning the whole domain but shared between x and y. */
245*412f47f9SXin Li #define V_POW_INTERVAL2(xlo, xhi, ylo, yhi, n) \
246*412f47f9SXin Li PL_TEST_INTERVAL2 (V_NAME_D2 (pow), xlo, xhi, ylo, yhi, n) \
247*412f47f9SXin Li PL_TEST_INTERVAL2 (V_NAME_D2 (pow), xlo, xhi, -ylo, -yhi, n) \
248*412f47f9SXin Li PL_TEST_INTERVAL2 (V_NAME_D2 (pow), -xlo, -xhi, ylo, yhi, n) \
249*412f47f9SXin Li PL_TEST_INTERVAL2 (V_NAME_D2 (pow), -xlo, -xhi, -ylo, -yhi, n)
250*412f47f9SXin Li #define EXPAND(str) str##000000000
251*412f47f9SXin Li #define SHL52(str) EXPAND (str)
252*412f47f9SXin Li V_POW_INTERVAL2 (0, SHL52 (SmallPowX), 0, inf, 40000)
253*412f47f9SXin Li V_POW_INTERVAL2 (SHL52 (SmallPowX), SHL52 (BigPowX), 0, inf, 40000)
254*412f47f9SXin Li V_POW_INTERVAL2 (SHL52 (BigPowX), inf, 0, inf, 40000)
255*412f47f9SXin Li V_POW_INTERVAL2 (0, inf, 0, SHL52 (SmallPowY), 40000)
256*412f47f9SXin Li V_POW_INTERVAL2 (0, inf, SHL52 (SmallPowY), SHL52 (BigPowY), 40000)
257*412f47f9SXin Li V_POW_INTERVAL2 (0, inf, SHL52 (BigPowY), inf, 40000)
258*412f47f9SXin Li V_POW_INTERVAL2 (0, inf, 0, inf, 1000)
259*412f47f9SXin Li /* x~1 or y~1. */
260*412f47f9SXin Li V_POW_INTERVAL2 (0x1p-1, 0x1p1, 0x1p-10, 0x1p10, 10000)
261*412f47f9SXin Li V_POW_INTERVAL2 (0x1p-500, 0x1p500, 0x1p-1, 0x1p1, 10000)
262*412f47f9SXin Li V_POW_INTERVAL2 (0x1.ep-1, 0x1.1p0, 0x1p8, 0x1p16, 10000)
263*412f47f9SXin Li /* around argmaxs of ULP error. */
264*412f47f9SXin Li V_POW_INTERVAL2 (0x1p-300, 0x1p-200, 0x1p-20, 0x1p-10, 10000)
265*412f47f9SXin Li V_POW_INTERVAL2 (0x1p50, 0x1p100, 0x1p-20, 0x1p-10, 10000)
266*412f47f9SXin Li /* x is negative, y is odd or even integer, or y is real not integer. */
267*412f47f9SXin Li PL_TEST_INTERVAL2 (V_NAME_D2 (pow), -0.0, -10.0, 3.0, 3.0, 10000)
268*412f47f9SXin Li PL_TEST_INTERVAL2 (V_NAME_D2 (pow), -0.0, -10.0, 4.0, 4.0, 10000)
269*412f47f9SXin Li PL_TEST_INTERVAL2 (V_NAME_D2 (pow), -0.0, -10.0, 0.0, 10.0, 10000)
270*412f47f9SXin Li PL_TEST_INTERVAL2 (V_NAME_D2 (pow), 0.0, 10.0, -0.0, -10.0, 10000)
271*412f47f9SXin Li /* 1.0^y. */
272*412f47f9SXin Li PL_TEST_INTERVAL2 (V_NAME_D2 (pow), 1.0, 1.0, 0.0, 0x1p-50, 1000)
273*412f47f9SXin Li PL_TEST_INTERVAL2 (V_NAME_D2 (pow), 1.0, 1.0, 0x1p-50, 1.0, 1000)
274*412f47f9SXin Li PL_TEST_INTERVAL2 (V_NAME_D2 (pow), 1.0, 1.0, 1.0, 0x1p100, 1000)
275*412f47f9SXin Li PL_TEST_INTERVAL2 (V_NAME_D2 (pow), 1.0, 1.0, -1.0, -0x1p120, 1000)
276