1 // Auto-generated file. Do not edit!
2 // Template: src/f32-vsigmoid/neon-lut64-p2.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/vunary.h>
16
17
18 extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
19
xnn_f32_vsigmoid_ukernel__neon_rr2_lut64_p2_nr2recps_x8(size_t n,const float * x,float * y,const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS (1)])20 void xnn_f32_vsigmoid_ukernel__neon_rr2_lut64_p2_nr2recps_x8(
21 size_t n,
22 const float* x,
23 float* y,
24 const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
25 {
26 assert(n % sizeof(float) == 0);
27
28 const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.magic_bias);
29 const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.minus_log2e);
30 const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
31 const float32x4_t vln2_hi = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.ln2_hi);
32 const float32x4_t vln2_lo = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.ln2_lo);
33 const float32x4_t vc2 = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.c2);
34 const float32x4_t vone = vmovq_n_f32(1.0f);
35 const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neon_rr2_lut64_p2.denorm_cutoff);
36
37 for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
38 const float32x4_t vx0123 = vld1q_f32(x); x += 4;
39 const float32x4_t vx4567 = vld1q_f32(x); x += 4;
40
41 const float32x4_t vz0123 = vabsq_f32(vx0123);
42 const float32x4_t vz4567 = vabsq_f32(vx4567);
43
44 float32x4_t vn0123 = vmlaq_f32(vmagic_bias, vz0123, vminus_log2e);
45 float32x4_t vn4567 = vmlaq_f32(vmagic_bias, vz4567, vminus_log2e);
46
47 const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 17);
48 const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 17);
49
50 // Use bits 0:6 bits of n, as integer, as an index for table lookup of l := 2**(n % 64).
51 const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
52 const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
53
54 const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
55 const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
56 float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx01]);
57 float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx23]);
58 const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
59 const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
60 float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx45]);
61 float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx67]);
62
63 vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
64 vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
65 const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
66 vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
67 vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
68 const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
69
70 const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
71 const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
72
73 vn0123 = vsubq_f32(vn0123, vmagic_bias);
74 vn4567 = vsubq_f32(vn4567, vmagic_bias);
75
76 float32x4_t vt0123 = vmlaq_f32(vz0123, vn0123, vln2_hi);
77 float32x4_t vt4567 = vmlaq_f32(vz4567, vn4567, vln2_hi);
78
79 vt0123 = vmlaq_f32(vt0123, vn0123, vln2_lo);
80 vt4567 = vmlaq_f32(vt4567, vn4567, vln2_lo);
81
82 float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
83 float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
84
85 vp0123 = vmlsq_f32(vt0123, vp0123, vt0123);
86 vp4567 = vmlsq_f32(vt4567, vp4567, vt4567);
87
88 const float32x4_t vy0123 = vmlsq_f32(vs0123, vs0123, vp0123);
89 const float32x4_t vy4567 = vmlsq_f32(vs4567, vs4567, vp4567);
90
91 const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
92 const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
93
94 float32x4_t vr0123 = vrecpeq_f32(vd0123);
95 float32x4_t vr4567 = vrecpeq_f32(vd4567);
96
97 vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
98 vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
99
100 vr0123 = vmulq_f32(vr0123, vrecpsq_f32(vr0123, vd0123));
101 vr4567 = vmulq_f32(vr4567, vrecpsq_f32(vr4567, vd4567));
102
103 float32x4_t vf0123 = vmulq_f32(vy0123, vr0123);
104 float32x4_t vf4567 = vmulq_f32(vy4567, vr4567);
105
106 vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
107 vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
108
109 const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
110 const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
111
112 vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
113 vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
114
115 vst1q_f32(y, vf0123); y += 4;
116 vst1q_f32(y, vf4567); y += 4;
117 }
118 for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
119 const float32x4_t vx = vld1q_f32(x); x += 4;
120
121 const float32x4_t vz = vabsq_f32(vx);
122
123 float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
124 const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
125
126 const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
127 const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
128 const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
129 float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
130 float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
131 vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
132 vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
133 const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
134
135 const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
136 vn = vsubq_f32(vn, vmagic_bias);
137 float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
138 vt = vmlaq_f32(vt, vn, vln2_lo);
139
140 float32x4_t vp = vmulq_f32(vt, vc2);
141 vp = vmlsq_f32(vt, vp, vt);
142
143 const float32x4_t vy = vmlsq_f32(vs, vs, vp);
144 const float32x4_t vd = vaddq_f32(vy, vone);
145
146 float32x4_t vr = vrecpeq_f32(vd);
147 vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
148 vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
149
150 float32x4_t vf = vmulq_f32(vy, vr);
151 vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
152 const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
153 vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
154
155 vst1q_f32(y, vf); y += 4;
156 }
157 if XNN_UNLIKELY(n != 0) {
158 const float32x4_t vx = vld1q_f32(x);
159
160 const float32x4_t vz = vabsq_f32(vx);
161
162 float32x4_t vn = vmlaq_f32(vmagic_bias, vz, vminus_log2e);
163 const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
164
165 const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
166 const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
167 const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
168 float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
169 float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
170 vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
171 vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
172 const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
173
174 const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
175 vn = vsubq_f32(vn, vmagic_bias);
176 float32x4_t vt = vmlaq_f32(vz, vn, vln2_hi);
177 vt = vmlaq_f32(vt, vn, vln2_lo);
178
179 float32x4_t vp = vmulq_f32(vt, vc2);
180 vp = vmlsq_f32(vt, vp, vt);
181
182 const float32x4_t vy = vmlsq_f32(vs, vs, vp);
183 const float32x4_t vd = vaddq_f32(vy, vone);
184
185 float32x4_t vr = vrecpeq_f32(vd);
186 vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
187 vr = vmulq_f32(vr, vrecpsq_f32(vr, vd));
188
189 float32x4_t vf = vmulq_f32(vy, vr);
190 vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
191 const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
192 vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
193
194 float32x2_t vf_lo = vget_low_f32(vf);
195 if (n & (2 * sizeof(float))) {
196 vst1_f32(y, vf_lo); y += 2;
197 vf_lo = vget_high_f32(vf);
198 }
199 if (n & (1 * sizeof(float))) {
200 vst1_lane_f32(y, vf_lo, 0);
201 }
202 }
203 }
204