xref: /aosp_15_r20/external/arm-optimized-routines/math/aarch64/v_expf_1u.c (revision 412f47f9e737e10ed5cc46ec6a8d7fa2264f8a14)
1*412f47f9SXin Li /*
2*412f47f9SXin Li  * Single-precision vector e^x function.
3*412f47f9SXin Li  *
4*412f47f9SXin Li  * Copyright (c) 2019-2023, Arm Limited.
5*412f47f9SXin Li  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6*412f47f9SXin Li  */
7*412f47f9SXin Li 
8*412f47f9SXin Li #include "mathlib.h"
9*412f47f9SXin Li #include "v_math.h"
10*412f47f9SXin Li 
11*412f47f9SXin Li static const float Poly[] = {
12*412f47f9SXin Li   /*  maxerr: 0.36565 +0.5 ulp.  */
13*412f47f9SXin Li   0x1.6a6000p-10f,
14*412f47f9SXin Li   0x1.12718ep-7f,
15*412f47f9SXin Li   0x1.555af0p-5f,
16*412f47f9SXin Li   0x1.555430p-3f,
17*412f47f9SXin Li   0x1.fffff4p-2f,
18*412f47f9SXin Li };
19*412f47f9SXin Li #define C0 v_f32 (Poly[0])
20*412f47f9SXin Li #define C1 v_f32 (Poly[1])
21*412f47f9SXin Li #define C2 v_f32 (Poly[2])
22*412f47f9SXin Li #define C3 v_f32 (Poly[3])
23*412f47f9SXin Li #define C4 v_f32 (Poly[4])
24*412f47f9SXin Li 
25*412f47f9SXin Li #define Shift v_f32 (0x1.8p23f)
26*412f47f9SXin Li #define InvLn2 v_f32 (0x1.715476p+0f)
27*412f47f9SXin Li #define Ln2hi v_f32 (0x1.62e4p-1f)
28*412f47f9SXin Li #define Ln2lo v_f32 (0x1.7f7d1cp-20f)
29*412f47f9SXin Li 
30*412f47f9SXin Li static float32x4_t VPCS_ATTR NOINLINE
specialcase(float32x4_t poly,float32x4_t n,uint32x4_t e,float32x4_t absn)31*412f47f9SXin Li specialcase (float32x4_t poly, float32x4_t n, uint32x4_t e, float32x4_t absn)
32*412f47f9SXin Li {
33*412f47f9SXin Li   /* 2^n may overflow, break it up into s1*s2.  */
34*412f47f9SXin Li   uint32x4_t b = (n <= v_f32 (0.0f)) & v_u32 (0x83000000);
35*412f47f9SXin Li   float32x4_t s1 = vreinterpretq_f32_u32 (v_u32 (0x7f000000) + b);
36*412f47f9SXin Li   float32x4_t s2 = vreinterpretq_f32_u32 (e - b);
37*412f47f9SXin Li   uint32x4_t cmp = absn > v_f32 (192.0f);
38*412f47f9SXin Li   float32x4_t r1 = s1 * s1;
39*412f47f9SXin Li   float32x4_t r0 = poly * s1 * s2;
40*412f47f9SXin Li   return vreinterpretq_f32_u32 ((cmp & vreinterpretq_u32_f32 (r1))
41*412f47f9SXin Li 				| (~cmp & vreinterpretq_u32_f32 (r0)));
42*412f47f9SXin Li }
43*412f47f9SXin Li 
44*412f47f9SXin Li float32x4_t VPCS_ATTR
_ZGVnN4v_expf_1u(float32x4_t x)45*412f47f9SXin Li _ZGVnN4v_expf_1u (float32x4_t x)
46*412f47f9SXin Li {
47*412f47f9SXin Li   float32x4_t n, r, scale, poly, absn, z;
48*412f47f9SXin Li   uint32x4_t cmp, e;
49*412f47f9SXin Li 
50*412f47f9SXin Li   /* exp(x) = 2^n * poly(r), with poly(r) in [1/sqrt(2),sqrt(2)]
51*412f47f9SXin Li      x = ln2*n + r, with r in [-ln2/2, ln2/2].  */
52*412f47f9SXin Li #if 1
53*412f47f9SXin Li   z = vfmaq_f32 (Shift, x, InvLn2);
54*412f47f9SXin Li   n = z - Shift;
55*412f47f9SXin Li   r = vfmaq_f32 (x, n, -Ln2hi);
56*412f47f9SXin Li   r = vfmaq_f32 (r, n, -Ln2lo);
57*412f47f9SXin Li   e = vreinterpretq_u32_f32 (z) << 23;
58*412f47f9SXin Li #else
59*412f47f9SXin Li   z = x * InvLn2;
60*412f47f9SXin Li   n = vrndaq_f32 (z);
61*412f47f9SXin Li   r = vfmaq_f32 (x, n, -Ln2hi);
62*412f47f9SXin Li   r = vfmaq_f32 (r, n, -Ln2lo);
63*412f47f9SXin Li   e = vreinterpretq_u32_s32 (vcvtaq_s32_f32 (z)) << 23;
64*412f47f9SXin Li #endif
65*412f47f9SXin Li   scale = vreinterpretq_f32_u32 (e + v_u32 (0x3f800000));
66*412f47f9SXin Li   absn = vabsq_f32 (n);
67*412f47f9SXin Li   cmp = absn > v_f32 (126.0f);
68*412f47f9SXin Li   poly = vfmaq_f32 (C1, C0, r);
69*412f47f9SXin Li   poly = vfmaq_f32 (C2, poly, r);
70*412f47f9SXin Li   poly = vfmaq_f32 (C3, poly, r);
71*412f47f9SXin Li   poly = vfmaq_f32 (C4, poly, r);
72*412f47f9SXin Li   poly = vfmaq_f32 (v_f32 (1.0f), poly, r);
73*412f47f9SXin Li   poly = vfmaq_f32 (v_f32 (1.0f), poly, r);
74*412f47f9SXin Li   if (unlikely (v_any_u32 (cmp)))
75*412f47f9SXin Li     return specialcase (poly, n, e, absn);
76*412f47f9SXin Li   return scale * poly;
77*412f47f9SXin Li }
78