1 /*
2 * Single-precision vector 2^x function.
3 *
4 * Copyright (c) 2019-2023, Arm Limited.
5 * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6 */
7
8 #include "mathlib.h"
9 #include "v_math.h"
10
11 static const struct data
12 {
13 float32x4_t poly[5];
14 uint32x4_t exponent_bias;
15 #if !WANT_SIMD_EXCEPT
16 float32x4_t special_bound, scale_thresh;
17 #endif
18 } data = {
19 /* maxerr: 1.962 ulp. */
20 .poly = { V4 (0x1.59977ap-10f), V4 (0x1.3ce9e4p-7f), V4 (0x1.c6bd32p-5f),
21 V4 (0x1.ebf9bcp-3f), V4 (0x1.62e422p-1f) },
22 .exponent_bias = V4 (0x3f800000),
23 #if !WANT_SIMD_EXCEPT
24 .special_bound = V4 (126.0f),
25 .scale_thresh = V4 (192.0f),
26 #endif
27 };
28
29 #define C(i) d->poly[i]
30
31 #if WANT_SIMD_EXCEPT
32
33 # define TinyBound v_u32 (0x20000000) /* asuint (0x1p-63). */
34 # define BigBound v_u32 (0x42800000) /* asuint (0x1p6). */
35 # define SpecialBound v_u32 (0x22800000) /* BigBound - TinyBound. */
36
37 static float32x4_t VPCS_ATTR NOINLINE
special_case(float32x4_t x,float32x4_t y,uint32x4_t cmp)38 special_case (float32x4_t x, float32x4_t y, uint32x4_t cmp)
39 {
40 /* If fenv exceptions are to be triggered correctly, fall back to the scalar
41 routine for special lanes. */
42 return v_call_f32 (exp2f, x, y, cmp);
43 }
44
45 #else
46
47 # define SpecialOffset v_u32 (0x82000000)
48 # define SpecialBias v_u32 (0x7f000000)
49
50 static float32x4_t VPCS_ATTR NOINLINE
special_case(float32x4_t poly,float32x4_t n,uint32x4_t e,uint32x4_t cmp1,float32x4_t scale,const struct data * d)51 special_case (float32x4_t poly, float32x4_t n, uint32x4_t e, uint32x4_t cmp1,
52 float32x4_t scale, const struct data *d)
53 {
54 /* 2^n may overflow, break it up into s1*s2. */
55 uint32x4_t b = vandq_u32 (vclezq_f32 (n), SpecialOffset);
56 float32x4_t s1 = vreinterpretq_f32_u32 (vaddq_u32 (b, SpecialBias));
57 float32x4_t s2 = vreinterpretq_f32_u32 (vsubq_u32 (e, b));
58 uint32x4_t cmp2 = vcagtq_f32 (n, d->scale_thresh);
59 float32x4_t r2 = vmulq_f32 (s1, s1);
60 float32x4_t r1 = vmulq_f32 (vfmaq_f32 (s2, poly, s2), s1);
61 /* Similar to r1 but avoids double rounding in the subnormal range. */
62 float32x4_t r0 = vfmaq_f32 (scale, poly, scale);
63 float32x4_t r = vbslq_f32 (cmp1, r1, r0);
64 return vbslq_f32 (cmp2, r2, r);
65 }
66
67 #endif
68
V_NAME_F1(exp2)69 float32x4_t VPCS_ATTR V_NAME_F1 (exp2) (float32x4_t x)
70 {
71 const struct data *d = ptr_barrier (&data);
72 float32x4_t n, r, r2, scale, p, q, poly;
73 uint32x4_t cmp, e;
74
75 #if WANT_SIMD_EXCEPT
76 /* asuint(|x|) - TinyBound >= BigBound - TinyBound. */
77 uint32x4_t ia = vreinterpretq_u32_f32 (vabsq_f32 (x));
78 cmp = vcgeq_u32 (vsubq_u32 (ia, TinyBound), SpecialBound);
79 float32x4_t xm = x;
80 /* If any lanes are special, mask them with 1 and retain a copy of x to allow
81 special_case to fix special lanes later. This is only necessary if fenv
82 exceptions are to be triggered correctly. */
83 if (unlikely (v_any_u32 (cmp)))
84 x = vbslq_f32 (cmp, v_f32 (1), x);
85 #endif
86
87 /* exp2(x) = 2^n (1 + poly(r)), with 1 + poly(r) in [1/sqrt(2),sqrt(2)]
88 x = n + r, with r in [-1/2, 1/2]. */
89 n = vrndaq_f32 (x);
90 r = vsubq_f32 (x, n);
91 e = vshlq_n_u32 (vreinterpretq_u32_s32 (vcvtaq_s32_f32 (x)), 23);
92 scale = vreinterpretq_f32_u32 (vaddq_u32 (e, d->exponent_bias));
93
94 #if !WANT_SIMD_EXCEPT
95 cmp = vcagtq_f32 (n, d->special_bound);
96 #endif
97
98 r2 = vmulq_f32 (r, r);
99 p = vfmaq_f32 (C (1), C (0), r);
100 q = vfmaq_f32 (C (3), C (2), r);
101 q = vfmaq_f32 (q, p, r2);
102 p = vmulq_f32 (C (4), r);
103 poly = vfmaq_f32 (p, q, r2);
104
105 if (unlikely (v_any_u32 (cmp)))
106 #if WANT_SIMD_EXCEPT
107 return special_case (xm, vfmaq_f32 (scale, poly, scale), cmp);
108 #else
109 return special_case (poly, n, e, cmp, scale, d);
110 #endif
111
112 return vfmaq_f32 (scale, poly, scale);
113 }
114