xref: /aosp_15_r20/external/XNNPACK/src/f32-vsigmoid/gen/vsigmoid-neonfma-rr1-lut64-p2-div-x12.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-vsigmoid/neon-lut64-p2.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/vunary.h>
16 
17 
18 extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
19 
xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut64_p2_div_x12(size_t n,const float * x,float * y,const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS (1)])20 void xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut64_p2_div_x12(
21     size_t n,
22     const float* x,
23     float* y,
24     const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
25 {
26   assert(n % sizeof(float) == 0);
27 
28   const float32x4_t vmagic_bias = vld1q_dup_f32(&params->neonfma_rr1_lut64_p2.magic_bias);
29   const float32x4_t vminus_log2e = vld1q_dup_f32(&params->neonfma_rr1_lut64_p2.minus_log2e);
30   const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
31   const float32x4_t vln2 = vld1q_dup_f32(&params->neonfma_rr1_lut64_p2.ln2);
32   const float32x4_t vc2 = vld1q_dup_f32(&params->neonfma_rr1_lut64_p2.c2);
33   const float32x4_t vone = vmovq_n_f32(1.0f);
34   const float32x4_t vdenorm_cutoff = vld1q_dup_f32(&params->neonfma_rr1_lut64_p2.denorm_cutoff);
35 
36   for (; n >= 12 * sizeof(float); n -= 12 * sizeof(float)) {
37     const float32x4_t vx0123 = vld1q_f32(x); x += 4;
38     const float32x4_t vx4567 = vld1q_f32(x); x += 4;
39     const float32x4_t vx89AB = vld1q_f32(x); x += 4;
40 
41     const float32x4_t vz0123 = vabsq_f32(vx0123);
42     const float32x4_t vz4567 = vabsq_f32(vx4567);
43     const float32x4_t vz89AB = vabsq_f32(vx89AB);
44 
45     float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
46     float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
47     float32x4_t vn89AB = vfmaq_f32(vmagic_bias, vz89AB, vminus_log2e);
48 
49     const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 17);
50     const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 17);
51     const int32x4_t ve89AB = vshlq_n_s32(vreinterpretq_s32_f32(vn89AB), 17);
52 
53     // Use bits 0:6 bits of n, as integer, as an index for table lookup of l := 2**(n % 64).
54     const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
55     const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
56     const uint64x2_t vidx89AB = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn89AB), vindex_mask));
57 
58     const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
59     const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
60     float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx01]);
61     float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx23]);
62     const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
63     const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
64     float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx45]);
65     float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx67]);
66     const uint64_t vidx89 = vgetq_lane_u64(vidx89AB, 0);
67     const uint64_t vidxAB = vgetq_lane_u64(vidx89AB, 1);
68     float32x2_t vl89 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx89]);
69     float32x2_t vlAB = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidxAB]);
70 
71     vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
72     vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
73     const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
74     vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
75     vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
76     const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
77     vl89 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx89 >> 32)], vl89, 1);
78     vlAB = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidxAB >> 32)], vlAB, 1);
79     const float32x4_t vl89AB = vcombine_f32(vl89, vlAB);
80 
81     const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
82     const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
83     const float32x4_t vs89AB = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl89AB), ve89AB));
84 
85     vn0123 = vsubq_f32(vn0123, vmagic_bias);
86     vn4567 = vsubq_f32(vn4567, vmagic_bias);
87     vn89AB = vsubq_f32(vn89AB, vmagic_bias);
88 
89     float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
90     float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
91     float32x4_t vt89AB = vfmaq_f32(vz89AB, vn89AB, vln2);
92 
93     float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
94     float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
95     float32x4_t vp89AB = vmulq_f32(vt89AB, vc2);
96 
97     vp0123 = vfmsq_f32(vt0123, vp0123, vt0123);
98     vp4567 = vfmsq_f32(vt4567, vp4567, vt4567);
99     vp89AB = vfmsq_f32(vt89AB, vp89AB, vt89AB);
100 
101     const float32x4_t vy0123 = vfmsq_f32(vs0123, vs0123, vp0123);
102     const float32x4_t vy4567 = vfmsq_f32(vs4567, vs4567, vp4567);
103     const float32x4_t vy89AB = vfmsq_f32(vs89AB, vs89AB, vp89AB);
104 
105     const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
106     const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
107     const float32x4_t vd89AB = vaddq_f32(vy89AB, vone);
108 
109     float32x4_t vf0123 = vdivq_f32(vy0123, vd0123);
110     float32x4_t vf4567 = vdivq_f32(vy4567, vd4567);
111     float32x4_t vf89AB = vdivq_f32(vy89AB, vd89AB);
112 
113     vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
114     vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
115     vf89AB = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf89AB), vcagtq_f32(vx89AB, vdenorm_cutoff)));
116 
117     const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
118     const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
119     const uint32x4_t vm89AB = vcltq_f32(vx89AB, vmovq_n_f32(0.0f));
120 
121     vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
122     vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
123     vf89AB = vbslq_f32(vm89AB, vf89AB, vsubq_f32(vone, vf89AB));
124 
125     vst1q_f32(y, vf0123); y += 4;
126     vst1q_f32(y, vf4567); y += 4;
127     vst1q_f32(y, vf89AB); y += 4;
128   }
129   for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
130     const float32x4_t vx = vld1q_f32(x); x += 4;
131 
132     const float32x4_t vz = vabsq_f32(vx);
133 
134     float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
135     const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
136 
137     const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
138     const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
139     const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
140     float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
141     float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
142     vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
143     vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
144     const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
145 
146     const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
147     vn = vsubq_f32(vn, vmagic_bias);
148     float32x4_t vt = vfmaq_f32(vz, vn, vln2);
149 
150     float32x4_t vp = vmulq_f32(vt, vc2);
151     vp = vfmsq_f32(vt, vp, vt);
152 
153     const float32x4_t vy = vfmsq_f32(vs, vs, vp);
154     const float32x4_t vd = vaddq_f32(vy, vone);
155 
156     float32x4_t vf = vdivq_f32(vy, vd);
157     vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
158     const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
159     vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
160 
161     vst1q_f32(y, vf); y += 4;
162   }
163   if XNN_UNLIKELY(n != 0) {
164     const float32x4_t vx = vld1q_f32(x);
165 
166     const float32x4_t vz = vabsq_f32(vx);
167 
168     float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
169     const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
170 
171     const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
172     const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
173     const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
174     float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
175     float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
176     vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
177     vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
178     const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
179 
180     const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
181     vn = vsubq_f32(vn, vmagic_bias);
182     float32x4_t vt = vfmaq_f32(vz, vn, vln2);
183 
184     float32x4_t vp = vmulq_f32(vt, vc2);
185     vp = vfmsq_f32(vt, vp, vt);
186 
187     const float32x4_t vy = vfmsq_f32(vs, vs, vp);
188     const float32x4_t vd = vaddq_f32(vy, vone);
189 
190     float32x4_t vf = vdivq_f32(vy, vd);
191     vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
192     const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
193     vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
194 
195     float32x2_t vf_lo = vget_low_f32(vf);
196     if (n & (2 * sizeof(float))) {
197       vst1_f32(y, vf_lo); y += 2;
198       vf_lo = vget_high_f32(vf);
199     }
200     if (n & (1 * sizeof(float))) {
201       vst1_lane_f32(y, vf_lo, 0);
202     }
203   }
204 }
205