xref: /aosp_15_r20/external/XNNPACK/src/f32-vsigmoid/gen/vsigmoid-neon-rr2-lut64-p2-nr2recps-x16.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-vsigmoid/neon-lut64-p2.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/vunary.h>
16 
17 
18 extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
19 
xnn_f32_vsigmoid_ukernel__neon_rr2_lut64_p2_nr2recps_x16(size_t n,const float * x,float * y,const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS (1)])20 void xnn_f32_vsigmoid_ukernel__neon_rr2_lut64_p2_nr2recps_x16(
21     size_t n,
22     const float* x,
23     float* y,
24     const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
25 {
26   assert(n % sizeof(float) == 0);
27 
28   const float32x4_t vmagic_bias = vld1q_dup_f32(&params->neon_rr2_lut64_p2.magic_bias);
29   const float32x4_t vminus_log2e = vld1q_dup_f32(&params->neon_rr2_lut64_p2.minus_log2e);
30   const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
31   const float32x4_t vln2_hi = vld1q_dup_f32(&params->neon_rr2_lut64_p2.ln2_hi);
32   const float32x4_t vln2_lo = vld1q_dup_f32(&params->neon_rr2_lut64_p2.ln2_lo);
33   const float32x4_t vc2 = vld1q_dup_f32(&params->neon_rr2_lut64_p2.c2);
34   const float32x4_t vone = vmovq_n_f32(1.0f);
35   const float32x4_t vdenorm_cutoff = vld1q_dup_f32(&params->neon_rr2_lut64_p2.denorm_cutoff);
36 
37   for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
38     const float32x4_t vx0123 = vld1q_f32(x); x += 4;
39     const float32x4_t vx4567 = vld1q_f32(x); x += 4;
40     const float32x4_t vx89AB = vld1q_f32(x); x += 4;
41     const float32x4_t vxCDEF = vld1q_f32(x); x += 4;
42 
43     const float32x4_t vz0123 = vabsq_f32(vx0123);
44     const float32x4_t vz4567 = vabsq_f32(vx4567);
45     const float32x4_t vz89AB = vabsq_f32(vx89AB);
46     const float32x4_t vzCDEF = vabsq_f32(vxCDEF);
47 
48     float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vminus_log2e);
49     float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vminus_log2e);
50     float32x4_t vn89AB = vmlaq_f32(vmagic_bias, vz89AB, vminus_log2e);
51     float32x4_t vnCDEF = vmlaq_f32(vmagic_bias, vzCDEF, vminus_log2e);
52 
53     const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 17);
54     const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 17);
55     const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 17);
56     const int32x4_t veCDEF = vshlq_n_s32(vreinterpretq_s32_f32(vnCDEF), 17);
57 
58     // Use bits 0:6 bits of n, as integer, as an index for table lookup of l := 2**(n % 64).
59     const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
60     const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
61     const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
62     const uint64x2_t vidxCDEF = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vnCDEF), vindex_mask));
63 
64     const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
65     const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
66     float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx01]);
67     float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx23]);
68     const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
69     const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
70     float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx45]);
71     float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx67]);
72     const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
73     const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
74     float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx89]);
75     float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxAB]);
76     const uint64_t vidxCD = vgetq_lane_u64(vidxCDEF, 0);
77     const uint64_t vidxEF = vgetq_lane_u64(vidxCDEF, 1);
78     float32x2_t vlCD = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxCD]);
79     float32x2_t vlEF = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxEF]);
80 
81     vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
82     vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
83     const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
84     vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
85     vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
86     const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
87     vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
88     vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
89     const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
90     vlCD = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxCD >> 32)], vlCD, 1);
91     vlEF = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxEF >> 32)], vlEF, 1);
92     const float32x4_t vlCDEF = vcombine_f32(vlCD, vlEF);
93 
94     const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
95     const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
96     const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
97     const float32x4_t vsCDEF = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vlCDEF), veCDEF));
98 
99     vn0123 = vsubq_f32(vn0123, vmagic_bias);
100     vn4567 = vsubq_f32(vn4567, vmagic_bias);
101     vn89AB = vsubq_f32(vn89AB, vmagic_bias);
102     vnCDEF = vsubq_f32(vnCDEF, vmagic_bias);
103 
104     float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vln2_hi);
105     float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vln2_hi);
106     float32x4_t vt89AB = vmlaq_f32(vz89AB, vn89AB, vln2_hi);
107     float32x4_t vtCDEF = vmlaq_f32(vzCDEF, vnCDEF, vln2_hi);
108 
109     vt0123 = vmlaq_f32(vt0123, vn0123, vln2_lo);
110     vt4567 = vmlaq_f32(vt4567, vn4567, vln2_lo);
111     vt89AB = vmlaq_f32(vt89AB, vn89AB, vln2_lo);
112     vtCDEF = vmlaq_f32(vtCDEF, vnCDEF, vln2_lo);
113 
114     float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
115     float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
116     float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
117     float32x4_t vpCDEF = vmulq_f32(vtCDEF, vc2);
118 
119     vp0123 = vmlsq_f32(vt0123, vp0123, vt0123);
120     vp4567 = vmlsq_f32(vt4567, vp4567, vt4567);
121     vp89AB = vmlsq_f32(vt89AB, vp89AB, vt89AB);
122     vpCDEF = vmlsq_f32(vtCDEF, vpCDEF, vtCDEF);
123 
124     const float32x4_t vy0123 = vmlsq_f32(vs0123, vs0123, vp0123);
125     const float32x4_t vy4567 = vmlsq_f32(vs4567, vs4567, vp4567);
126     const float32x4_t vy89AB = vmlsq_f32(vs89AB, vs89AB, vp89AB);
127     const float32x4_t vyCDEF = vmlsq_f32(vsCDEF, vsCDEF, vpCDEF);
128 
129     const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
130     const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
131     const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
132     const float32x4_t vdCDEF = vaddq_f32(vyCDEF, vone);
133 
134     float32x4_t vr0123 = vrecpeq_f32(vd0123);
135     float32x4_t vr4567 = vrecpeq_f32(vd4567);
136     float32x4_t vr89AB = vrecpeq_f32(vd89AB);
137     float32x4_t vrCDEF = vrecpeq_f32(vdCDEF);
138 
139     vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
140     vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
141     vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
142     vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
143 
144     vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
145     vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
146     vr89AB = vmulq_f32(vr89AB, vrecpsq_f32(vr89AB, vd89AB));
147     vrCDEF = vmulq_f32(vrCDEF, vrecpsq_f32(vrCDEF, vdCDEF));
148 
149     float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
150     float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
151     float32x4_t vf89AB = vmulq_f32(vy89AB, vr89AB);
152     float32x4_t vfCDEF = vmulq_f32(vyCDEF, vrCDEF);
153 
154     vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
155     vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
156     vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
157     vfCDEF = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vfCDEF), vcagtq_f32(vxCDEF, vdenorm_cutoff)));
158 
159     const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
160     const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
161     const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
162     const uint32x4_t vmCDEF = vcltq_f32(vxCDEF, vmovq_n_f32(0.0f));
163 
164     vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
165     vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
166     vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
167     vfCDEF = vbslq_f32(vmCDEF, vfCDEF, vsubq_f32(vone, vfCDEF));
168 
169     vst1q_f32(y, vf0123); y += 4;
170     vst1q_f32(y, vf4567); y += 4;
171     vst1q_f32(y, vf89AB); y += 4;
172     vst1q_f32(y, vfCDEF); y += 4;
173   }
174   for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
175     const float32x4_t vx = vld1q_f32(x); x += 4;
176 
177     const float32x4_t vz = vabsq_f32(vx);
178 
179     float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
180     const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
181 
182     const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
183     const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
184     const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
185     float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
186     float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
187     vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
188     vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
189     const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
190 
191     const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
192     vn = vsubq_f32(vn, vmagic_bias);
193     float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
194     vt = vmlaq_f32(vt, vn, vln2_lo);
195 
196     float32x4_t vp = vmulq_f32(vt, vc2);
197     vp = vmlsq_f32(vt, vp, vt);
198 
199     const float32x4_t vy = vmlsq_f32(vs, vs, vp);
200     const float32x4_t vd = vaddq_f32(vy, vone);
201 
202     float32x4_t vr = vrecpeq_f32(vd);
203     vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
204     vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
205 
206     float32x4_t vf = vmulq_f32(vy, vr);
207     vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
208     const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
209     vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
210 
211     vst1q_f32(y, vf); y += 4;
212   }
213   if XNN_UNLIKELY(n != 0) {
214     const float32x4_t vx = vld1q_f32(x);
215 
216     const float32x4_t vz = vabsq_f32(vx);
217 
218     float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
219     const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
220 
221     const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
222     const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
223     const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
224     float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
225     float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
226     vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
227     vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
228     const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
229 
230     const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
231     vn = vsubq_f32(vn, vmagic_bias);
232     float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
233     vt = vmlaq_f32(vt, vn, vln2_lo);
234 
235     float32x4_t vp = vmulq_f32(vt, vc2);
236     vp = vmlsq_f32(vt, vp, vt);
237 
238     const float32x4_t vy = vmlsq_f32(vs, vs, vp);
239     const float32x4_t vd = vaddq_f32(vy, vone);
240 
241     float32x4_t vr = vrecpeq_f32(vd);
242     vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
243     vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
244 
245     float32x4_t vf = vmulq_f32(vy, vr);
246     vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
247     const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
248     vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
249 
250     float32x2_t vf_lo = vget_low_f32(vf);
251     if (n & (2 * sizeof(float))) {
252       vst1_f32(y, vf_lo); y += 2;
253       vf_lo = vget_high_f32(vf);
254     }
255     if (n & (1 * sizeof(float))) {
256       vst1_lane_f32(y, vf_lo, 0);
257     }
258   }
259 }
260