xref: /aosp_15_r20/external/XNNPACK/src/f32-raddstoreexpminusmax/scalar-rr2-lut64-p2.c.in (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1// Copyright 2020 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert ELEMENTS_TILE >= 1
7$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
8#include <assert.h>
9
10#include <xnnpack/common.h>
11#include <xnnpack/math.h>
12#include <xnnpack/raddstoreexpminusmax.h>
13
14
15// Note redefine as uint32[] to avoid redundant bitcasts.
16extern XNN_INTERNAL const uint32_t xnn_table_exp2_k_over_64[64];
17
18void xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_lut64_p2_x${ELEMENTS_TILE}${"" if ACCUMULATORS == 1 else "_acc%d" % ACCUMULATORS}(
19    size_t elements,
20    const float* input,
21    const float* max,
22    float* output,
23    float* sum,
24    const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
25{
26  assert(elements % sizeof(float) == 0);
27
28  const float vi_max = *max;
29  const float vlog2e = params->scalar_rr2_lut64_p2.log2e;
30  const float vmagic_bias = params->scalar_rr2_lut64_p2.magic_bias;
31  const uint32_t vindex_mask = UINT32_C(0x3F);
32  const float vminus_ln2_hi = params->scalar_rr2_lut64_p2.minus_ln2_hi;
33  const float vminus_ln2_lo = params->scalar_rr2_lut64_p2.minus_ln2_lo;
34  const float vc2 = params->scalar_rr2_lut64_p2.c2;
35  const float vdenorm_cutoff = params->scalar_rr2_lut64_p2.denorm_cutoff;
36
37  $if ELEMENTS_TILE > 1:
38    $for K in range(ACCUMULATORS):
39      float vacc${K} = 0.0f;
40    for (; elements >= ${ELEMENTS_TILE} * sizeof(float); elements -= ${ELEMENTS_TILE} * sizeof(float)) {
41      // Load ${ELEMENTS_TILE} inputs at a time.
42      $for N in range(ELEMENTS_TILE):
43        const float vi${N} = input[${N}];
44      input += ${ELEMENTS_TILE};
45
46      // Subtract maximum input x := i - i_max. This implies x <= 0.
47      $for N in range(ELEMENTS_TILE):
48        const float vx${N} = vi${N} - vi_max;
49
50      // Compute reduced argument n := round(x * 64 / log(2)).
51      // We do it by adding a large number (magic bias), which cause rounding of the result to an integer, then subtracing
52      // the large number back. The first addition is combined with multiplication by log2e into a single FMA instruction.
53      // The trick with adding large number is valid only within certain bounds (|x * 64 / log(2)| <= 2**22, i.e.
54      // |x| <= 0x1.62E43p+15 = 45426.09375), but that is acceptable, because inputs outside of [-87.336540, 0.0]
55      // result in denormalized or underflown expf(x). We fixup the result for such inputs at the very end of the
56      // algorithm.
57      $for N in range(ELEMENTS_TILE):
58        float vn${N} = vx${N} * vlog2e + vmagic_bias;
59
60      // Create a floating-point number s (scale) such that s := 2**(n / 64) for such inputs that expf(x) is normalized,
61      // i.e. -87.33642 <= x <= 0.0. As n has 6 fractional bits, we split s == 2**(n / 64) = 2**e * 2**(n / 64 - e), where
62      // e := int(n / 64). We create s in two steps:
63      // 1. Fetch 2**(n / 64 - e) = 2**(n % 64) from the table using the 6 low bits of n, as integer. Note that the
64      //    fetched values are in the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
65      // 2. Adjust fecthed value by addition of e to its floating-point exponent. The result is always a normalized
66      //    number, because for -87.33642 <= x <= 0.0 (inputs for which expf(x) is normalized) we have -126 <= e <= 0,
67      //    and thus the adjusted exponent is not lower than -126.
68      //
69      // Extract e from bits 6:14 of n and shift it into bits 23:31 (position of floating-point exponent).
70      $for N in range(ELEMENTS_TILE):
71        const uint32_t ve${N} = (float_as_uint32(vn${N}) & UINT32_C(0xFFFFFFC0)) << 17;
72
73      // Use bits 0:6 bits of n, as integer, as an index for table lookup of l := 2**(n % 64).
74      $for N in range(ELEMENTS_TILE):
75        const uint32_t vidx${N} = float_as_uint32(vn${N}) & vindex_mask;
76      // Adjust exponent of the value l fetched from the table to get the final s value.
77      $for N in range(ELEMENTS_TILE):
78        const float vs${N} = uint32_as_float(xnn_table_exp2_k_over_64[vidx${N}] + ve${N});
79
80      // Subtract the large number back to get final n := round(x * 64 / log(2)) as a floating-point number.
81      $for N in range(ELEMENTS_TILE):
82        vn${N} -= vmagic_bias;
83
84      // Compute reduced argument t := x - n * log(2) / 64.
85      // Use Cody-Waite range reduction method (note the two constants representing log(2) / 64) to improve accuracy.
86      $for N in range(ELEMENTS_TILE):
87        float vt${N} = vn${N} * vminus_ln2_hi + vx${N};
88
89      $for N in range(ELEMENTS_TILE):
90        vt${N} = vn${N} * vminus_ln2_lo + vt${N};
91
92      // Compute degree-2 polynomial approximation for exp(t) on [-log(2)/128, log(2)/128].
93      $for N in range(ELEMENTS_TILE):
94        float vp${N} = vt${N} * vc2;
95
96      $for N in range(ELEMENTS_TILE):
97        vp${N} = vp${N} * vt${N} + vt${N};
98
99      // Reconstruct the final f value:
100      //   f = s * (1 + t * (1 + t * c2))
101      //     = s * (1 + t + t * (t * c2))
102      //     = s + s * (t + t * (t * c2))
103      //     = s + s * p
104      $for N in range(ELEMENTS_TILE):
105        float vf${N} = vp${N} * vs${N} + vs${N};
106
107      // For inputs below denormal cutoff, replace output with +0.0f.
108      // Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
109      $for N in range(ELEMENTS_TILE):
110        if XNN_UNPREDICTABLE(vx${N} < vdenorm_cutoff) {
111          vf${N} = 0.0f;
112        }
113
114      // Store ${ELEMENTS_TILE} outputs at a time.
115      $for N in range(ELEMENTS_TILE):
116        output[${N}] = vf${N};
117      output += ${ELEMENTS_TILE};
118
119      // Accumulate computed exponents.
120      $for N in range(ELEMENTS_TILE):
121        vacc${N % ACCUMULATORS} += vf${N};
122    }
123    $if ACCUMULATORS > 1:
124      // Add up all accumulators to vacc0
125      $ACC_SLICE = 1
126      $while ACC_SLICE < ACCUMULATORS:
127        $for A in range(0, ACCUMULATORS, ACC_SLICE * 2):
128          $if A + ACC_SLICE < ACCUMULATORS:
129            vacc${A} += vacc${A + ACC_SLICE};
130        $ACC_SLICE *= 2
131
132    float vacc = vacc0;
133  $else:
134    float vacc = 0.0f;
135  for (; elements >= sizeof(float); elements -= sizeof(float)) {
136    // Load 1 input at a time.
137    const float vi = *input++;
138
139    // Subtract maximum input x := i - i_max. This implies x <= 0.
140    const float vx = vi - vi_max;
141
142    // Compute reduced argument n := round(x * 64 / log(2)).
143    // We do it by adding a large number (magic bias), which cause rounding of the result to an integer, then subtracing
144    // the large number back. The first addition is combined with multiplication by log2e into a single FMA instruction.
145    // The trick with adding large number is valid only within certain bounds (|x * 64 / log(2)| <= 2**22, i.e.
146    // |x| <= 0x1.62E43p+15 = 45426.09375), but that is acceptable, because inputs outside of [-87.336540, 0.0]
147    // result in denormalized or underflown expf(x). We fixup the result for such inputs at the very end of the
148    // algorithm.
149    float vn = vx * vlog2e + vmagic_bias;
150
151    // Create a floating-point number s (scale) such that s := 2**(n / 64) for such inputs that expf(x) is normalized,
152    // i.e. -87.33642 <= x <= 0.0. As n has 6 fractional bits, we split s == 2**(n / 64) = 2**e * 2**(n / 64 - e), where
153    // e := int(n / 64). We create s in two steps:
154    // 1. Fetch 2**(n / 64 - e) = 2**(n % 64) from the table using the 6 low bits of n, as integer. Note that the
155    //    fetched values are in the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
156    // 2. Adjust fecthed value by addition of e to its floating-point exponent. The result is always a normalized
157    //    number, because for -87.33642 <= x <= 0.0 (inputs for which expf(x) is normalized) we have -126 <= e <= 0,
158    //    and thus the adjusted exponent is not lower than -126.
159    //
160    // Extract e from bits 6:14 of n and shift it into bits 23:31 (position of floating-point exponent).
161    const uint32_t ve = (float_as_uint32(vn) & UINT32_C(0xFFFFFFC0)) << 17;
162
163    // Use bits 0:6 bits of n, as integer, as an index for table lookup of l := 2**(n % 64).
164    const uint32_t vidx = float_as_uint32(vn) & vindex_mask;
165    // Adjust exponent of the value l fetched from the table to get the final s value.
166    const float vs = uint32_as_float(xnn_table_exp2_k_over_64[vidx] + ve);
167
168    // Subtract the large number back to get final n := round(x * 64 / log(2)) as a floating-point number.
169    vn -= vmagic_bias;
170
171    // Compute reduced argument t := x - n * log(2) / 64.
172    // Use Cody-Waite range reduction method (note the two constants representing log(2) / 64) to improve accuracy.
173    float vt = vn * vminus_ln2_hi + vx;
174    vt = vn * vminus_ln2_lo + vt;
175
176    // Compute degree-2 polynomial approximation for exp(t) on [-log(2)/128, log(2)/128].
177    float vp = vt * vc2;
178    vp = vp * vt + vt;
179
180    // Reconstruct the final f value:
181    //   f = s * (1 + t * (1 + t * c2))
182    //     = s * (1 + t + t * (t * c2))
183    //     = s + s * (t + t * (t * c2))
184    //     = s + s * p
185    float vf = vp * vs + vs;
186
187    // For inputs below denormal cutoff, replace output with +0.0f.
188    // Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
189    if XNN_UNPREDICTABLE(vx < vdenorm_cutoff) {
190      vf = 0.0f;
191    }
192
193    // Store 1 output at a time.
194    *output++ = vf;
195
196    // Accumulate computed exponents.
197    vacc += vf;
198  }
199  *sum = vacc;
200}
201