1 /*
2 * Single-precision vector erf(x) function.
3 *
4 * Copyright (c) 2023-2024, Arm Limited.
5 * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6 */
7
8 #include "v_math.h"
9 #include "pl_sig.h"
10 #include "pl_test.h"
11
12 static const struct data
13 {
14 float32x4_t max, shift, third;
15 #if WANT_SIMD_EXCEPT
16 float32x4_t tiny_bound, scale_minus_one;
17 #endif
18 } data = {
19 .max = V4 (3.9375), /* 4 - 8/128. */
20 .shift = V4 (0x1p16f),
21 .third = V4 (0x1.555556p-2f), /* 1/3. */
22 #if WANT_SIMD_EXCEPT
23 .tiny_bound = V4 (0x1p-62f),
24 .scale_minus_one = V4 (0x1.06eba8p-3f), /* scale - 1.0. */
25 #endif
26 };
27
28 #define AbsMask 0x7fffffff
29
30 struct entry
31 {
32 float32x4_t erf;
33 float32x4_t scale;
34 };
35
36 static inline struct entry
lookup(uint32x4_t i)37 lookup (uint32x4_t i)
38 {
39 struct entry e;
40 float32x2_t t0 = vld1_f32 (&__erff_data.tab[vgetq_lane_u32 (i, 0)].erf);
41 float32x2_t t1 = vld1_f32 (&__erff_data.tab[vgetq_lane_u32 (i, 1)].erf);
42 float32x2_t t2 = vld1_f32 (&__erff_data.tab[vgetq_lane_u32 (i, 2)].erf);
43 float32x2_t t3 = vld1_f32 (&__erff_data.tab[vgetq_lane_u32 (i, 3)].erf);
44 float32x4_t e1 = vcombine_f32 (t0, t1);
45 float32x4_t e2 = vcombine_f32 (t2, t3);
46 e.erf = vuzp1q_f32 (e1, e2);
47 e.scale = vuzp2q_f32 (e1, e2);
48 return e;
49 }
50
51 /* Single-precision implementation of vector erf(x).
52 Approximation based on series expansion near x rounded to
53 nearest multiple of 1/128.
54 Let d = x - r, and scale = 2 / sqrt(pi) * exp(-r^2). For x near r,
55
56 erf(x) ~ erf(r) + scale * d * [1 - r * d - 1/3 * d^2]
57
58 Values of erf(r) and scale are read from lookup tables.
59 For |x| > 3.9375, erf(|x|) rounds to 1.0f.
60
61 Maximum error: 1.93 ULP
62 _ZGVnN4v_erff(0x1.c373e6p-9) got 0x1.fd686cp-9
63 want 0x1.fd6868p-9. */
V_NAME_F1(erf)64 float32x4_t VPCS_ATTR V_NAME_F1 (erf) (float32x4_t x)
65 {
66 const struct data *dat = ptr_barrier (&data);
67
68 #if WANT_SIMD_EXCEPT
69 /* |x| < 2^-62. */
70 uint32x4_t cmp = vcaltq_f32 (x, dat->tiny_bound);
71 float32x4_t xm = x;
72 /* If any lanes are special, mask them with 1 and retain a copy of x to allow
73 special case handler to fix special lanes later. This is only necessary if
74 fenv exceptions are to be triggered correctly. */
75 if (unlikely (v_any_u32 (cmp)))
76 x = vbslq_f32 (cmp, v_f32 (1), x);
77 #endif
78
79 float32x4_t a = vabsq_f32 (x);
80 uint32x4_t a_gt_max = vcgtq_f32 (a, dat->max);
81
82 /* Lookup erf(r) and scale(r) in tables, e.g. set erf(r) to 0 and scale to
83 2/sqrt(pi), when x reduced to r = 0. */
84 float32x4_t shift = dat->shift;
85 float32x4_t z = vaddq_f32 (a, shift);
86
87 uint32x4_t i
88 = vsubq_u32 (vreinterpretq_u32_f32 (z), vreinterpretq_u32_f32 (shift));
89 i = vminq_u32 (i, v_u32 (512));
90 struct entry e = lookup (i);
91
92 float32x4_t r = vsubq_f32 (z, shift);
93
94 /* erf(x) ~ erf(r) + scale * d * (1 - r * d - 1/3 * d^2). */
95 float32x4_t d = vsubq_f32 (a, r);
96 float32x4_t d2 = vmulq_f32 (d, d);
97 float32x4_t y = vfmaq_f32 (r, dat->third, d);
98 y = vfmaq_f32 (e.erf, e.scale, vfmsq_f32 (d, d2, y));
99
100 /* Solves the |x| = inf case. */
101 y = vbslq_f32 (a_gt_max, v_f32 (1.0f), y);
102
103 /* Copy sign. */
104 y = vbslq_f32 (v_u32 (AbsMask), y, x);
105
106 #if WANT_SIMD_EXCEPT
107 if (unlikely (v_any_u32 (cmp)))
108 return vbslq_f32 (cmp, vfmaq_f32 (xm, dat->scale_minus_one, xm), y);
109 #endif
110 return y;
111 }
112
113 PL_SIG (V, F, 1, erf, -4.0, 4.0)
114 PL_TEST_ULP (V_NAME_F1 (erf), 1.43)
115 PL_TEST_EXPECT_FENV (V_NAME_F1 (erf), WANT_SIMD_EXCEPT)
116 PL_TEST_SYM_INTERVAL (V_NAME_F1 (erf), 0, 3.9375, 40000)
117 PL_TEST_SYM_INTERVAL (V_NAME_F1 (erf), 3.9375, inf, 40000)
118 PL_TEST_SYM_INTERVAL (V_NAME_F1 (erf), 0, inf, 40000)
119