1 // Copyright 2022 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5
6 #include <assert.h>
7 #include <stddef.h>
8
9 #include <arm_neon.h>
10
11 #include <xnnpack/math-stubs.h>
12
13
xnn_math_f16_sigmoid__neonfp16arith_rr1_p2_div(size_t n,const void * input,void * output)14 void xnn_math_f16_sigmoid__neonfp16arith_rr1_p2_div(
15 size_t n,
16 const void* input,
17 void* output)
18 {
19 assert(n % (8 * sizeof(__fp16)) == 0);
20
21 // Large number such that ulp(magic bias) == 1 and magic bias === 15 mod 2**9.
22 const float16x8_t vmagic_bias = vmovq_n_f16(0x1.83Cp+10f);
23 const float16x8_t vminus_log2e = vmovq_n_f16(-0x1.714p+0f);
24 const float16x8_t vln2 = vmovq_n_f16(0x1.630p-1f);
25 // Coefficient of polynomial approximation
26 // exp(-t) ~ 1 + t * (c1 + t * c2)
27 // on [-log(2)/2, log(2)/2]
28 const float16x8_t vc2 = vmovq_n_f16(0x1.FE4p-2f);
29 const float16x8_t vc1 = vmovq_n_f16(-0x1.038p+0f);
30 const float16x8_t vone = vmovq_n_f16(1.0f);
31 // The largest z for which sigmoidh(-z) is normalized.
32 // This number is also the largest z for which exph(-z) is normalized.
33 const float16x8_t vdenorm_cutoff = vmovq_n_f16(-0x1.368p+3f);
34
35 const __fp16* i = (const __fp16*) input;
36 __fp16* o = (__fp16*) output;
37 for (; n != 0; n -= 8 * sizeof(__fp16)) {
38 const float16x8_t vx = vld1q_f16(i); i += 8;
39
40 // General structure of the algorithm:
41 //
42 // / exp(x) / (1 + exp(x)) if x <= 0
43 // f[x] :=
44 // \ 1 - f[-x] if x >= 0
45 //
46 // First we compute f[-z] := exp(-z) / (1 + exp(-z)) where z = abs(x),
47 // then replace result with 1 - f[-z] if x >= 0.
48 const float16x8_t vz = vabsq_f16(vx);
49
50 // Compute reduced argument n := round(-z / log(2)).
51 // We do it by adding a large number (magic bias) to the product z * (-1/log(2)), which cause rounding of the
52 // result to an integer, then subtracing the large number back. The first addition is combined with multiplication
53 // by -log2e into a single FMA instruction. The trick with adding large number is valid only within certain bounds
54 // (|-x / log(2)| <= 2**9, i.e. |z| <= 0x1.630p+8 = 355.0), but that is acceptable, because inputs outside
55 // of [-9.703125, 8.3125] (i.e. z outside [0, 9.703125]) underflow or saturate sigmoidh(x). We fixup the result for
56 // such inputs at the very end of the algorithm.
57 float16x8_t vn = vfmaq_f16(vmagic_bias, vz, vminus_log2e);
58
59 // Create a floating-point number s (scale) such that s == 2**n for inputs which don't cause underflow, i.e.
60 // -9.703125 <= -z <= 0.0, and -14 <= n <= 0 accordingly.
61 const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
62
63 // Subtract the large number back to get the final n := round(-z / log(2)) as a floating-point number.
64 vn = vsubq_f16(vn, vmagic_bias);
65
66 // Compute reduced argument t := z - n * log(2). Note that -t = -z - n * log(2).
67 float16x8_t vt = vfmaq_f16(vz, vn, vln2);
68
69 // Compute degree-2 polynomial approximation for exp(-t) on [-log(2)/2, log(2)/2]:
70 // P(t) = 1 + t * (c1 + t * c2) = 1 + t * p
71 float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
72
73 // Reconstruct the exp(-z) value:
74 // e = s * (1 + t * (c1 + t * c2)
75 // = s * (1 + t * p)
76 // = s + (t * s) * p
77 vt = vmulq_f16(vt, vs);
78 float16x8_t ve = vfmaq_f16(vs, vp, vt);
79
80 // Denominator of the sigmoid fraction: 1.0 + exp(-z)
81 float16x8_t vd = vaddq_f16(ve, vone);
82
83 // Reconstruct sigmoid(-z) = exp(-z) / (1.0 + exp(-z))
84 float16x8_t vf = vdivq_f16(ve, vd);
85
86 // For inputs below denormal cutoff, replace output with +0.0f.
87 // Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
88 vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vcagtq_f16(vx, vdenorm_cutoff)));
89
90 // Reconstruct sigmoid(x) = x < 0 ? sigmoid(-z) : 1.0 - sigmoid(-z)
91 const uint16x8_t vm = vcltq_f16(vx, vmovq_n_f16(0.0f));
92 vf = vbslq_f16(vm, vf, vsubq_f16(vone, vf));
93
94 vst1q_f16(o, vf); o += 8;
95 }
96 }
97