1*412f47f9SXin Li /*
2*412f47f9SXin Li * Helper for single-precision routines which calculate exp(x) and do not
3*412f47f9SXin Li * need special-case handling
4*412f47f9SXin Li *
5*412f47f9SXin Li * Copyright (c) 2019-2024, Arm Limited.
6*412f47f9SXin Li * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
7*412f47f9SXin Li */
8*412f47f9SXin Li
9*412f47f9SXin Li #ifndef PL_MATH_V_EXPF_INLINE_H
10*412f47f9SXin Li #define PL_MATH_V_EXPF_INLINE_H
11*412f47f9SXin Li
12*412f47f9SXin Li #include "v_math.h"
13*412f47f9SXin Li
14*412f47f9SXin Li struct v_expf_data
15*412f47f9SXin Li {
16*412f47f9SXin Li float32x4_t poly[5];
17*412f47f9SXin Li float32x4_t shift;
18*412f47f9SXin Li float invln2_and_ln2[4];
19*412f47f9SXin Li };
20*412f47f9SXin Li
21*412f47f9SXin Li /* maxerr: 1.45358 +0.5 ulp. */
22*412f47f9SXin Li #define V_EXPF_DATA \
23*412f47f9SXin Li { \
24*412f47f9SXin Li .poly = { V4 (0x1.0e4020p-7f), V4 (0x1.573e2ep-5f), V4 (0x1.555e66p-3f), \
25*412f47f9SXin Li V4 (0x1.fffdb6p-2f), V4 (0x1.ffffecp-1f) }, \
26*412f47f9SXin Li .shift = V4 (0x1.8p23f), \
27*412f47f9SXin Li .invln2_and_ln2 = { 0x1.715476p+0f, 0x1.62e4p-1f, 0x1.7f7d1cp-20f, 0 }, \
28*412f47f9SXin Li }
29*412f47f9SXin Li
30*412f47f9SXin Li #define ExponentBias v_u32 (0x3f800000) /* asuint(1.0f). */
31*412f47f9SXin Li #define C(i) d->poly[i]
32*412f47f9SXin Li
33*412f47f9SXin Li static inline float32x4_t
v_expf_inline(float32x4_t x,const struct v_expf_data * d)34*412f47f9SXin Li v_expf_inline (float32x4_t x, const struct v_expf_data *d)
35*412f47f9SXin Li {
36*412f47f9SXin Li /* Helper routine for calculating exp(x).
37*412f47f9SXin Li Copied from v_expf.c, with all special-case handling removed - the
38*412f47f9SXin Li calling routine should handle special values if required. */
39*412f47f9SXin Li
40*412f47f9SXin Li /* exp(x) = 2^n (1 + poly(r)), with 1 + poly(r) in [1/sqrt(2),sqrt(2)]
41*412f47f9SXin Li x = ln2*n + r, with r in [-ln2/2, ln2/2]. */
42*412f47f9SXin Li float32x4_t n, r, z;
43*412f47f9SXin Li float32x4_t invln2_and_ln2 = vld1q_f32 (d->invln2_and_ln2);
44*412f47f9SXin Li z = vfmaq_laneq_f32 (d->shift, x, invln2_and_ln2, 0);
45*412f47f9SXin Li n = vsubq_f32 (z, d->shift);
46*412f47f9SXin Li r = vfmsq_laneq_f32 (x, n, invln2_and_ln2, 1);
47*412f47f9SXin Li r = vfmsq_laneq_f32 (r, n, invln2_and_ln2, 2);
48*412f47f9SXin Li uint32x4_t e = vshlq_n_u32 (vreinterpretq_u32_f32 (z), 23);
49*412f47f9SXin Li float32x4_t scale = vreinterpretq_f32_u32 (vaddq_u32 (e, ExponentBias));
50*412f47f9SXin Li
51*412f47f9SXin Li /* Custom order-4 Estrin avoids building high order monomial. */
52*412f47f9SXin Li float32x4_t r2 = vmulq_f32 (r, r);
53*412f47f9SXin Li float32x4_t p, q, poly;
54*412f47f9SXin Li p = vfmaq_f32 (C (1), C (0), r);
55*412f47f9SXin Li q = vfmaq_f32 (C (3), C (2), r);
56*412f47f9SXin Li q = vfmaq_f32 (q, p, r2);
57*412f47f9SXin Li p = vmulq_f32 (C (4), r);
58*412f47f9SXin Li poly = vfmaq_f32 (p, q, r2);
59*412f47f9SXin Li return vfmaq_f32 (scale, poly, scale);
60*412f47f9SXin Li }
61*412f47f9SXin Li
62*412f47f9SXin Li #endif // PL_MATH_V_EXPF_INLINE_H
63