1 // Auto-generated file. Do not edit!
2 // Template: src/f32-vsigmoid/neon-lut64-p2.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/vunary.h>
16
17
18 extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
19
xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut64_p2_div_x8(size_t n,const float * x,float * y,const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS (1)])20 void xnn_f32_vsigmoid_ukernel__neonfma_rr1_lut64_p2_div_x8(
21 size_t n,
22 const float* x,
23 float* y,
24 const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
25 {
26 assert(n % sizeof(float) == 0);
27
28 const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.magic_bias);
29 const float32x4_t vminus_log2e = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.minus_log2e);
30 const int32x4_t vindex_mask = vmovq_n_s32(INT32_C(0x3F));
31 const float32x4_t vln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.ln2);
32 const float32x4_t vc2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.c2);
33 const float32x4_t vone = vmovq_n_f32(1.0f);
34 const float32x4_t vdenorm_cutoff = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.denorm_cutoff);
35
36 for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
37 const float32x4_t vx0123 = vld1q_f32(x); x += 4;
38 const float32x4_t vx4567 = vld1q_f32(x); x += 4;
39
40 const float32x4_t vz0123 = vabsq_f32(vx0123);
41 const float32x4_t vz4567 = vabsq_f32(vx4567);
42
43 float32x4_t vn0123 = vfmaq_f32(vmagic_bias, vz0123, vminus_log2e);
44 float32x4_t vn4567 = vfmaq_f32(vmagic_bias, vz4567, vminus_log2e);
45
46 const int32x4_t ve0123 = vshlq_n_s32(vreinterpretq_s32_f32(vn0123), 17);
47 const int32x4_t ve4567 = vshlq_n_s32(vreinterpretq_s32_f32(vn4567), 17);
48
49 // Use bits 0:6 bits of n, as integer, as an index for table lookup of l := 2**(n % 64).
50 const uint64x2_t vidx0123 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn0123), vindex_mask));
51 const uint64x2_t vidx4567 = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn4567), vindex_mask));
52
53 const uint64_t vidx01 = vgetq_lane_u64(vidx0123, 0);
54 const uint64_t vidx23 = vgetq_lane_u64(vidx0123, 1);
55 float32x2_t vl01 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx01]);
56 float32x2_t vl23 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx23]);
57 const uint64_t vidx45 = vgetq_lane_u64(vidx4567, 0);
58 const uint64_t vidx67 = vgetq_lane_u64(vidx4567, 1);
59 float32x2_t vl45 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx45]);
60 float32x2_t vl67 = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx67]);
61
62 vl01 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx01 >> 32)], vl01, 1);
63 vl23 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx23 >> 32)], vl23, 1);
64 const float32x4_t vl0123 = vcombine_f32(vl01, vl23);
65 vl45 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx45 >> 32)], vl45, 1);
66 vl67 = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx67 >> 32)], vl67, 1);
67 const float32x4_t vl4567 = vcombine_f32(vl45, vl67);
68
69 const float32x4_t vs0123 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl0123), ve0123));
70 const float32x4_t vs4567 = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl4567), ve4567));
71
72 vn0123 = vsubq_f32(vn0123, vmagic_bias);
73 vn4567 = vsubq_f32(vn4567, vmagic_bias);
74
75 float32x4_t vt0123 = vfmaq_f32(vz0123, vn0123, vln2);
76 float32x4_t vt4567 = vfmaq_f32(vz4567, vn4567, vln2);
77
78 float32x4_t vp0123 = vmulq_f32(vt0123, vc2);
79 float32x4_t vp4567 = vmulq_f32(vt4567, vc2);
80
81 vp0123 = vfmsq_f32(vt0123, vp0123, vt0123);
82 vp4567 = vfmsq_f32(vt4567, vp4567, vt4567);
83
84 const float32x4_t vy0123 = vfmsq_f32(vs0123, vs0123, vp0123);
85 const float32x4_t vy4567 = vfmsq_f32(vs4567, vs4567, vp4567);
86
87 const float32x4_t vd0123 = vaddq_f32(vy0123, vone);
88 const float32x4_t vd4567 = vaddq_f32(vy4567, vone);
89
90 float32x4_t vf0123 = vdivq_f32(vy0123, vd0123);
91 float32x4_t vf4567 = vdivq_f32(vy4567, vd4567);
92
93 vf0123 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf0123), vcagtq_f32(vx0123, vdenorm_cutoff)));
94 vf4567 = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf4567), vcagtq_f32(vx4567, vdenorm_cutoff)));
95
96 const uint32x4_t vm0123 = vcltq_f32(vx0123, vmovq_n_f32(0.0f));
97 const uint32x4_t vm4567 = vcltq_f32(vx4567, vmovq_n_f32(0.0f));
98
99 vf0123 = vbslq_f32(vm0123, vf0123, vsubq_f32(vone, vf0123));
100 vf4567 = vbslq_f32(vm4567, vf4567, vsubq_f32(vone, vf4567));
101
102 vst1q_f32(y, vf0123); y += 4;
103 vst1q_f32(y, vf4567); y += 4;
104 }
105 for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
106 const float32x4_t vx = vld1q_f32(x); x += 4;
107
108 const float32x4_t vz = vabsq_f32(vx);
109
110 float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
111 const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
112
113 const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
114 const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
115 const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
116 float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
117 float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
118 vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
119 vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
120 const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
121
122 const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
123 vn = vsubq_f32(vn, vmagic_bias);
124 float32x4_t vt = vfmaq_f32(vz, vn, vln2);
125
126 float32x4_t vp = vmulq_f32(vt, vc2);
127 vp = vfmsq_f32(vt, vp, vt);
128
129 const float32x4_t vy = vfmsq_f32(vs, vs, vp);
130 const float32x4_t vd = vaddq_f32(vy, vone);
131
132 float32x4_t vf = vdivq_f32(vy, vd);
133 vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
134 const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
135 vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
136
137 vst1q_f32(y, vf); y += 4;
138 }
139 if XNN_UNLIKELY(n != 0) {
140 const float32x4_t vx = vld1q_f32(x);
141
142 const float32x4_t vz = vabsq_f32(vx);
143
144 float32x4_t vn = vfmaq_f32(vmagic_bias, vz, vminus_log2e);
145 const int32x4_t ve = vshlq_n_s32(vreinterpretq_s32_f32(vn), 17);
146
147 const uint64x2_t vidx = vreinterpretq_u64_s32(vandq_s32(vreinterpretq_s32_f32(vn), vindex_mask));
148 const uint64_t vidx_lo = vgetq_lane_u64(vidx, 0);
149 const uint64_t vidx_hi = vgetq_lane_u64(vidx, 1);
150 float32x2_t vl_lo = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_lo]);
151 float32x2_t vl_hi = vld1_dup_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) vidx_hi]);
152 vl_lo = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_lo >> 32)], vl_lo, 1);
153 vl_hi = vld1_lane_f32(&xnn_table_exp2minus_k_over_64[(uint32_t) (vidx_hi >> 32)], vl_hi, 1);
154 const float32x4_t vl = vcombine_f32(vl_lo, vl_hi);
155
156 const float32x4_t vs = vreinterpretq_f32_s32(vaddq_s32(vreinterpretq_s32_f32(vl), ve));
157 vn = vsubq_f32(vn, vmagic_bias);
158 float32x4_t vt = vfmaq_f32(vz, vn, vln2);
159
160 float32x4_t vp = vmulq_f32(vt, vc2);
161 vp = vfmsq_f32(vt, vp, vt);
162
163 const float32x4_t vy = vfmsq_f32(vs, vs, vp);
164 const float32x4_t vd = vaddq_f32(vy, vone);
165
166 float32x4_t vf = vdivq_f32(vy, vd);
167 vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcagtq_f32(vx, vdenorm_cutoff)));
168 const uint32x4_t vm = vcltq_f32(vx, vmovq_n_f32(0.0f));
169 vf = vbslq_f32(vm, vf, vsubq_f32(vone, vf));
170
171 float32x2_t vf_lo = vget_low_f32(vf);
172 if (n & (2 * sizeof(float))) {
173 vst1_f32(y, vf_lo); y += 2;
174 vf_lo = vget_high_f32(vf);
175 }
176 if (n & (1 * sizeof(float))) {
177 vst1_lane_f32(y, vf_lo, 0);
178 }
179 }
180 }
181