xref: /aosp_15_r20/external/XNNPACK/src/f32-vsigmoid/gen/vsigmoid-avx2-rr1-p5-div-x32.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-vsigmoid/avx2-rr1-p5.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/vunary.h>
16 
17 
xnn_f32_vsigmoid_ukernel__avx2_rr1_p5_div_x32(size_t n,const float * x,float * y,const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_vsigmoid_ukernel__avx2_rr1_p5_div_x32(
19     size_t n,
20     const float* x,
21     float* y,
22     const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
23 {
24   assert(n % sizeof(float) == 0);
25 
26   const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p5.sign_mask);
27   const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
28   const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
29   const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
30   const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
31   const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
32   const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
33   const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
34   const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
35   const __m256 vone = _mm256_load_ps(params->avx2_rr1_p5.one);
36   const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
37 
38   for (; n >= 32 * sizeof(float); n -= 32 * sizeof(float)) {
39     const __m256 vx0 = _mm256_loadu_ps(x);
40     const __m256 vx1 = _mm256_loadu_ps(x + 8);
41     const __m256 vx2 = _mm256_loadu_ps(x + 16);
42     const __m256 vx3 = _mm256_loadu_ps(x + 24);
43     x += 32;
44 
45     const __m256 vz0 = _mm256_or_ps(vx0, vsign_mask);
46     const __m256 vz1 = _mm256_or_ps(vx1, vsign_mask);
47     const __m256 vz2 = _mm256_or_ps(vx2, vsign_mask);
48     const __m256 vz3 = _mm256_or_ps(vx3, vsign_mask);
49 
50     __m256 vn0 = _mm256_fmadd_ps(vz0, vlog2e, vmagic_bias);
51     __m256 vn1 = _mm256_fmadd_ps(vz1, vlog2e, vmagic_bias);
52     __m256 vn2 = _mm256_fmadd_ps(vz2, vlog2e, vmagic_bias);
53     __m256 vn3 = _mm256_fmadd_ps(vz3, vlog2e, vmagic_bias);
54 
55     const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
56     const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
57     const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
58     const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
59 
60     vn0 = _mm256_sub_ps(vn0, vmagic_bias);
61     vn1 = _mm256_sub_ps(vn1, vmagic_bias);
62     vn2 = _mm256_sub_ps(vn2, vmagic_bias);
63     vn3 = _mm256_sub_ps(vn3, vmagic_bias);
64 
65     __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vz0);
66     __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vz1);
67     __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vz2);
68     __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vz3);
69 
70     __m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
71     __m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
72     __m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
73     __m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
74 
75     vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
76     vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
77     vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
78     vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
79 
80     vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
81     vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
82     vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
83     vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
84 
85     vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
86     vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
87     vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
88     vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
89 
90     vt0 = _mm256_mul_ps(vt0, vs0);
91     vt1 = _mm256_mul_ps(vt1, vs1);
92     vt2 = _mm256_mul_ps(vt2, vs2);
93     vt3 = _mm256_mul_ps(vt3, vs3);
94 
95     const __m256 ve0 = _mm256_fmadd_ps(vt0, vp0, vs0);
96     const __m256 ve1 = _mm256_fmadd_ps(vt1, vp1, vs1);
97     const __m256 ve2 = _mm256_fmadd_ps(vt2, vp2, vs2);
98     const __m256 ve3 = _mm256_fmadd_ps(vt3, vp3, vs3);
99 
100     const __m256 vd0 = _mm256_add_ps(ve0, vone);
101     const __m256 vd1 = _mm256_add_ps(ve1, vone);
102     const __m256 vd2 = _mm256_add_ps(ve2, vone);
103     const __m256 vd3 = _mm256_add_ps(ve3, vone);
104 
105     __m256 vf0 = _mm256_div_ps(ve0, vd0);
106     __m256 vf1 = _mm256_div_ps(ve1, vd1);
107     __m256 vf2 = _mm256_div_ps(ve2, vd2);
108     __m256 vf3 = _mm256_div_ps(ve3, vd3);
109 
110     vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vz0, vdenorm_cutoff, _CMP_LT_OS), vf0);
111     vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1);
112     vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vz2, vdenorm_cutoff, _CMP_LT_OS), vf2);
113     vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vz3, vdenorm_cutoff, _CMP_LT_OS), vf3);
114 
115     vf0 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf0), vf0, vx0);
116     vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1);
117     vf2 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf2), vf2, vx2);
118     vf3 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf3), vf3, vx3);
119 
120     _mm256_storeu_ps(y, vf0);
121     _mm256_storeu_ps(y + 8, vf1);
122     _mm256_storeu_ps(y + 16, vf2);
123     _mm256_storeu_ps(y + 24, vf3);
124     y += 32;
125   }
126   for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
127     const __m256 vx = _mm256_loadu_ps(x);
128     x += 8;
129 
130     const __m256 vz = _mm256_or_ps(vx, vsign_mask);
131 
132     __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
133     const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
134     vn = _mm256_sub_ps(vn, vmagic_bias);
135 
136     __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
137 
138     __m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
139     vp = _mm256_fmadd_ps(vp, vt, vc3);
140     vp = _mm256_fmadd_ps(vp, vt, vc2);
141     vp = _mm256_fmadd_ps(vp, vt, vc1);
142 
143     vt = _mm256_mul_ps(vt, vs);
144     const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
145 
146     const __m256 vd = _mm256_add_ps(ve, vone);
147     __m256 vf = _mm256_div_ps(ve, vd);
148 
149     vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
150     vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
151 
152     _mm256_storeu_ps(y, vf);
153     y += 8;
154   }
155   if XNN_UNLIKELY(n != 0) {
156     assert(n >= 1 * sizeof(float));
157     assert(n <= 7 * sizeof(float));
158     const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &params->avx2_rr1_p5.mask_table[7] - n));
159 
160     const __m256 vx = _mm256_maskload_ps(x, vmask);
161 
162     const __m256 vz = _mm256_or_ps(vx, vsign_mask);
163 
164     __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias);
165     const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
166     vn = _mm256_sub_ps(vn, vmagic_bias);
167 
168     __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz);
169 
170     __m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
171     vp = _mm256_fmadd_ps(vp, vt, vc3);
172     vp = _mm256_fmadd_ps(vp, vt, vc2);
173     vp = _mm256_fmadd_ps(vp, vt, vc1);
174 
175     vt = _mm256_mul_ps(vt, vs);
176     const __m256 ve = _mm256_fmadd_ps(vt, vp, vs);
177 
178     const __m256 vd = _mm256_add_ps(ve, vone);
179     __m256 vf = _mm256_div_ps(ve, vd);
180 
181     vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf);
182     vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx);
183 
184     __m128 vf_lo = _mm256_castps256_ps128(vf);
185     if (n & (4 * sizeof(float))) {
186       _mm_storeu_ps(y, vf_lo);
187       vf_lo = _mm256_extractf128_ps(vf, 1);
188       y += 4;
189     }
190     if (n & (2 * sizeof(float))) {
191       _mm_storel_pi((__m64*) y, vf_lo);
192       vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
193       y += 2;
194     }
195     if (n & (1 * sizeof(float))) {
196       _mm_store_ss(y, vf_lo);
197     }
198   }
199 }
200