xref: /aosp_15_r20/external/XNNPACK/src/f32-vsigmoid/neon-lut2048-p1.c.in (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1// Copyright 2019 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE % 4 == 0
7$assert BATCH_TILE >= 4
8$assert DIV_ALGO in ["div", "nr2fma", "nr2recps", "nr1recps1fma"]
9$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
10$VMULADDQ_F32 = "vfmaq_f32" if FMA else "vmlaq_f32"
11#include <assert.h>
12
13#include <arm_neon.h>
14
15#include <xnnpack/common.h>
16#include <xnnpack/vunary.h>
17
18
19extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_2048[2048];
20
21$PARAMS_STRUCT = "neonfma_rr1_lut2048_p1" if FMA else "neon_rr2_lut2048_p1"
22void xnn_f32_vsigmoid_ukernel__${"neonfma" if FMA else "neon"}_rr${1 if FMA else 2}_lut2048_p1_${DIV_ALGO}_x${BATCH_TILE}(
23    size_t n,
24    const float* x,
25    float* y,
26    const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
27{
28  assert(n % sizeof(float) == 0);
29
30  const float32x4_t vmagic_bias = vld1q_dup_f32(&params->${PARAMS_STRUCT}.magic_bias);
31  const float32x4_t vminus_log2e = vld1q_dup_f32(&params->${PARAMS_STRUCT}.minus_log2e);
32  const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x7FF));
33  $if FMA:
34    const float32x4_t vln2 = vld1q_dup_f32(&params->${PARAMS_STRUCT}.ln2);
35  $else:
36    const float32x4_t vln2_hi = vld1q_dup_f32(&params->${PARAMS_STRUCT}.ln2_hi);
37    const float32x4_t vln2_lo = vld1q_dup_f32(&params->${PARAMS_STRUCT}.ln2_lo);
38  const float32x4_t vc1 = vld1q_dup_f32(&params->${PARAMS_STRUCT}.c1);
39  const float32x4_t vone = vmovq_n_f32(1.0f);
40  const float32x4_t vdenorm_cutoff = vld1q_dup_f32(&params->${PARAMS_STRUCT}.denorm_cutoff);
41
42  $if BATCH_TILE > 4:
43    for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
44      $for N in range(0, BATCH_TILE, 4):
45        const float32x4_t vx${ABC[N:N+4]} = vld1q_f32(x); x += 4;
46
47      $for N in range(0, BATCH_TILE, 4):
48        const float32x4_t vz${ABC[N:N+4]} = vabsq_f32(vx${ABC[N:N+4]});
49
50      $for N in range(0, BATCH_TILE, 4):
51        float32x4_t vn${ABC[N:N+4]} = ${VMULADDQ_F32}(vmagic_bias, vz${ABC[N:N+4]}, vminus_log2e);
52
53      $for N in range(0, BATCH_TILE, 4):
54        const int32x4_t ve${ABC[N:N+4]} = vshlq_n_s32(vreinterpretq_s32_f32(vn${ABC[N:N+4]}), 12);
55
56      $for N in range(0, BATCH_TILE, 4):
57        const uint64x2_t vidx${ABC[N:N+4]} = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn${ABC[N:N+4]}), vindex_mask));
58
59      $for N in range(0, BATCH_TILE, 4):
60        const uint64_t vidx${ABC[N:N+2]} = vgetq_lane_u64(vidx${ABC[N:N+4]}, 0);
61        const uint64_t vidx${ABC[N+2:N+4]} = vgetq_lane_u64(vidx${ABC[N:N+4]}, 1);
62        float32x2_t vl${ABC[N:N+2]} = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx${ABC[N:N+2]}]);
63        float32x2_t vl${ABC[N+2:N+4]} = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx${ABC[N+2:N+4]}]);
64
65      $for N in range(0, BATCH_TILE, 4):
66        vl${ABC[N:N+2]} = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx${ABC[N:N+2]} >> 32)], vl${ABC[N:N+2]}, 1);
67        vl${ABC[N+2:N+4]} = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx${ABC[N+2:N+4]} >> 32)], vl${ABC[N+2:N+4]}, 1);
68        const float32x4_t vl${ABC[N:N+4]} = vcombine_f32(vl${ABC[N:N+2]}, vl${ABC[N+2:N+4]});
69
70      $for N in range(0, BATCH_TILE, 4):
71        const float32x4_t vs${ABC[N:N+4]} = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl${ABC[N:N+4]}), ve${ABC[N:N+4]}));
72
73      $for N in range(0, BATCH_TILE, 4):
74        vn${ABC[N:N+4]} = vsubq_f32(vn${ABC[N:N+4]}, vmagic_bias);
75
76      $if FMA:
77        $for N in range(0, BATCH_TILE, 4):
78          float32x4_t vt${ABC[N:N+4]} = ${VMULADDQ_F32}(vz${ABC[N:N+4]}, vn${ABC[N:N+4]}, vln2);
79      $else:
80        $for N in range(0, BATCH_TILE, 4):
81          float32x4_t vt${ABC[N:N+4]} = ${VMULADDQ_F32}(vz${ABC[N:N+4]}, vn${ABC[N:N+4]}, vln2_hi);
82
83        $for N in range(0, BATCH_TILE, 4):
84          vt${ABC[N:N+4]} = ${VMULADDQ_F32}(vt${ABC[N:N+4]}, vn${ABC[N:N+4]}, vln2_lo);
85
86      $for N in range(0, BATCH_TILE, 4):
87        const float32x4_t vp${ABC[N:N+4]} = vmulq_f32(vt${ABC[N:N+4]}, vc1);
88
89      $for N in range(0, BATCH_TILE, 4):
90        const float32x4_t vy${ABC[N:N+4]} = ${VMULADDQ_F32}(vs${ABC[N:N+4]}, vs${ABC[N:N+4]}, vp${ABC[N:N+4]});
91
92      $for N in range(0, BATCH_TILE, 4):
93        const float32x4_t vd${ABC[N:N+4]} = vaddq_f32(vy${ABC[N:N+4]}, vone);
94
95      $if DIV_ALGO == "div":
96        $for N in range(0, BATCH_TILE, 4):
97          float32x4_t vf${ABC[N:N+4]} = vdivq_f32(vy${ABC[N:N+4]}, vd${ABC[N:N+4]});
98      $else:
99        $for N in range(0, BATCH_TILE, 4):
100          float32x4_t vr${ABC[N:N+4]} = vrecpeq_f32(vd${ABC[N:N+4]});
101
102        $if DIV_ALGO == "nr2fma":
103          $for N in range(0, BATCH_TILE, 4):
104            vr${ABC[N:N+4]} = vfmaq_f32(vr${ABC[N:N+4]}, vr${ABC[N:N+4]}, vfmsq_f32(vone, vr${ABC[N:N+4]}, vd${ABC[N:N+4]}));
105        $else:
106          $for N in range(0, BATCH_TILE, 4):
107            vr${ABC[N:N+4]} = vmulq_f32(vr${ABC[N:N+4]}, vrecpsq_f32(vr${ABC[N:N+4]}, vd${ABC[N:N+4]}));
108
109        $if DIV_ALGO == "nr2recps":
110          $for N in range(0, BATCH_TILE, 4):
111            vr${ABC[N:N+4]} = vmulq_f32(vr${ABC[N:N+4]}, vrecpsq_f32(vr${ABC[N:N+4]}, vd${ABC[N:N+4]}));
112        $else:
113          $for N in range(0, BATCH_TILE, 4):
114            vr${ABC[N:N+4]} = vfmaq_f32(vr${ABC[N:N+4]}, vr${ABC[N:N+4]}, vfmsq_f32(vone, vr${ABC[N:N+4]}, vd${ABC[N:N+4]}));
115
116        $for N in range(0, BATCH_TILE, 4):
117          float32x4_t vf${ABC[N:N+4]} = vmulq_f32(vy${ABC[N:N+4]}, vr${ABC[N:N+4]});
118
119      $for N in range(0, BATCH_TILE, 4):
120        vf${ABC[N:N+4]} = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf${ABC[N:N+4]}), vcagtq_f32(vx${ABC[N:N+4]}, vdenorm_cutoff)));
121
122      $for N in range(0, BATCH_TILE, 4):
123        const uint32x4_t vm${ABC[N:N+4]} = vcltq_f32(vx${ABC[N:N+4]}, vmovq_n_f32(0.0f));
124
125      $for N in range(0, BATCH_TILE, 4):
126        vf${ABC[N:N+4]} = vbslq_f32(vm${ABC[N:N+4]}, vf${ABC[N:N+4]}, vsubq_f32(vone, vf${ABC[N:N+4]}));
127
128      $for N in range(0, BATCH_TILE, 4):
129        vst1q_f32(y, vf${ABC[N:N+4]}); y += 4;
130    }
131  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
132    const float32x4_t vx = vld1q_f32(x); x += 4;
133
134    const float32x4_t vz = vabsq_f32(vx);
135
136    float32x4_t vn = ${VMULADDQ_F32}(vmagic_bias, vz, vminus_log2e);
137    const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
138
139    const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
140    const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
141    const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
142    float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
143    float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
144    vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
145    vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
146    const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
147
148    const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
149    vn = vsubq_f32(vn, vmagic_bias);
150    $if FMA:
151      float32x4_t vt = ${VMULADDQ_F32}(vz, vn, vln2);
152    $else:
153      float32x4_t vt = ${VMULADDQ_F32}(vz, vn, vln2_hi);
154      vt = ${VMULADDQ_F32}(vt, vn, vln2_lo);
155
156    const float32x4_t vp = vmulq_f32(vt, vc1);
157
158    const float32x4_t vy = ${VMULADDQ_F32}(vs, vs, vp);
159    const float32x4_t vd = vaddq_f32(vy, vone);
160
161    $if DIV_ALGO == "div":
162      float32x4_t vf = vdivq_f32(vy, vd);
163    $else:
164      float32x4_t vr = vrecpeq_f32(vd);
165      $if DIV_ALGO == "nr2fma":
166        vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
167      $else:
168        vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
169      $if DIV_ALGO == "nr2recps":
170        vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
171      $else:
172        vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
173
174      float32x4_t vf = vmulq_f32(vy, vr);
175    vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
176    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
177    vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
178
179    vst1q_f32(y, vf); y += 4;
180  }
181  if XNN_UNLIKELY(n != 0) {
182    const float32x4_t vx = vld1q_f32(x);
183
184    const float32x4_t vz = vabsq_f32(vx);
185
186    float32x4_t vn = ${VMULADDQ_F32}(vmagic_bias, vz, vminus_log2e);
187    const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 12);
188
189    const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
190    const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
191    const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
192    float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_lo]);
193    float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) vidx_hi]);
194    vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
195    vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_2048[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
196    const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
197
198    const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
199    vn = vsubq_f32(vn, vmagic_bias);
200    $if FMA:
201      float32x4_t vt = ${VMULADDQ_F32}(vz, vn, vln2);
202    $else:
203      float32x4_t vt = ${VMULADDQ_F32}(vz, vn, vln2_hi);
204      vt = ${VMULADDQ_F32}(vt, vn, vln2_lo);
205
206    const float32x4_t vp = vmulq_f32(vt, vc1);
207
208    const float32x4_t vy = ${VMULADDQ_F32}(vs, vs, vp);
209    const float32x4_t vd = vaddq_f32(vy, vone);
210
211    $if DIV_ALGO == "div":
212      float32x4_t vf = vdivq_f32(vy, vd);
213    $else:
214      float32x4_t vr = vrecpeq_f32(vd);
215      $if DIV_ALGO == "nr2fma":
216        vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
217      $else:
218        vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
219      $if DIV_ALGO == "nr2recps":
220        vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
221      $else:
222        vr = vfmaq_f32(vr, vr, vfmsq_f32(vone, vr, vd));
223
224      float32x4_t vf = vmulq_f32(vy, vr);
225    vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
226    const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
227    vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
228
229    float32x2_t vf_lo = vget_low_f32(vf);
230    if (n & (2 * sizeof(float))) {
231      vst1_f32(y, vf_lo); y += 2;
232      vf_lo = vget_high_f32(vf);
233    }
234    if (n & (1 * sizeof(float))) {
235      vst1_lane_f32(y, vf_lo, 0);
236    }
237  }
238}
239