xref: /aosp_15_r20/external/XNNPACK/src/f32-raddstoreexpminusmax/gen/avx2-rr1-p5-x64-acc2.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-raddstoreexpminusmax/avx2-rr1-p5.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/raddstoreexpminusmax.h>
15 
16 
xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_x64_acc2(size_t elements,const float * input,const float * max,float * output,float * sum,const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_x64_acc2(
18     size_t elements,
19     const float* input,
20     const float* max,
21     float* output,
22     float* sum,
23     const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
24 {
25   assert(elements % sizeof(float) == 0);
26 
27   const __m256 vi_max = _mm256_broadcast_ss(max);
28   const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
29   const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
30   const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
31   const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
32   const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
33   const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
34   const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
35   const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
36   const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
37 
38   __m256 vacc0 = _mm256_setzero_ps();
39   __m256 vacc1 = _mm256_setzero_ps();
40   for (; elements >= 64 * sizeof(float); elements -= 64 * sizeof(float)) {
41     const __m256 vi0 = _mm256_loadu_ps(input);
42     const __m256 vi1 = _mm256_loadu_ps(input + 8);
43     const __m256 vi2 = _mm256_loadu_ps(input + 16);
44     const __m256 vi3 = _mm256_loadu_ps(input + 24);
45     const __m256 vi4 = _mm256_loadu_ps(input + 32);
46     const __m256 vi5 = _mm256_loadu_ps(input + 40);
47     const __m256 vi6 = _mm256_loadu_ps(input + 48);
48     const __m256 vi7 = _mm256_loadu_ps(input + 56);
49     input += 64;
50 
51     const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
52     const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
53     const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
54     const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
55     const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
56     const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
57     const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
58     const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
59 
60     __m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
61     __m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
62     __m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
63     __m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
64     __m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
65     __m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
66     __m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
67     __m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
68 
69     const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
70     const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
71     const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
72     const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
73     const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
74     const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
75     const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
76     const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
77 
78     vn0 = _mm256_sub_ps(vn0, vmagic_bias);
79     vn1 = _mm256_sub_ps(vn1, vmagic_bias);
80     vn2 = _mm256_sub_ps(vn2, vmagic_bias);
81     vn3 = _mm256_sub_ps(vn3, vmagic_bias);
82     vn4 = _mm256_sub_ps(vn4, vmagic_bias);
83     vn5 = _mm256_sub_ps(vn5, vmagic_bias);
84     vn6 = _mm256_sub_ps(vn6, vmagic_bias);
85     vn7 = _mm256_sub_ps(vn7, vmagic_bias);
86 
87     __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
88     __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
89     __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vx2);
90     __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vx3);
91     __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vx4);
92     __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vx5);
93     __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vx6);
94     __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vx7);
95 
96     __m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
97     __m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
98     __m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
99     __m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
100     __m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
101     __m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
102     __m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
103     __m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
104 
105     vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
106     vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
107     vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
108     vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
109     vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
110     vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
111     vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
112     vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
113 
114     vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
115     vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
116     vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
117     vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
118     vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
119     vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
120     vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
121     vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
122 
123     vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
124     vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
125     vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
126     vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
127     vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
128     vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
129     vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
130     vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
131 
132     vt0 = _mm256_mul_ps(vt0, vs0);
133     vt1 = _mm256_mul_ps(vt1, vs1);
134     vt2 = _mm256_mul_ps(vt2, vs2);
135     vt3 = _mm256_mul_ps(vt3, vs3);
136     vt4 = _mm256_mul_ps(vt4, vs4);
137     vt5 = _mm256_mul_ps(vt5, vs5);
138     vt6 = _mm256_mul_ps(vt6, vs6);
139     vt7 = _mm256_mul_ps(vt7, vs7);
140 
141     __m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
142     __m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
143     __m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
144     __m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
145     __m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
146     __m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
147     __m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
148     __m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
149 
150     vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
151     vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
152     vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
153     vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
154     vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
155     vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
156     vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
157     vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
158 
159     _mm256_storeu_ps(output, vf0);
160     _mm256_storeu_ps(output + 8, vf1);
161     _mm256_storeu_ps(output + 16, vf2);
162     _mm256_storeu_ps(output + 24, vf3);
163     _mm256_storeu_ps(output + 32, vf4);
164     _mm256_storeu_ps(output + 40, vf5);
165     _mm256_storeu_ps(output + 48, vf6);
166     _mm256_storeu_ps(output + 56, vf7);
167     output += 64;
168 
169     vacc0 = _mm256_add_ps(vacc0, vf0);
170     vacc1 = _mm256_add_ps(vacc1, vf1);
171     vacc0 = _mm256_add_ps(vacc0, vf2);
172     vacc1 = _mm256_add_ps(vacc1, vf3);
173     vacc0 = _mm256_add_ps(vacc0, vf4);
174     vacc1 = _mm256_add_ps(vacc1, vf5);
175     vacc0 = _mm256_add_ps(vacc0, vf6);
176     vacc1 = _mm256_add_ps(vacc1, vf7);
177   }
178   vacc0 = _mm256_add_ps(vacc0, vacc1);
179 
180   __m256 vacc = vacc0;
181   for (; elements >= 8 * sizeof(float); elements -= 8 * sizeof(float)) {
182     const __m256 vi = _mm256_loadu_ps(input);
183     input += 8;
184 
185     const __m256 vx = _mm256_sub_ps(vi, vi_max);
186 
187     __m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
188 
189     const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
190 
191     vn = _mm256_sub_ps(vn, vmagic_bias);
192 
193     __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
194 
195     __m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
196     vp = _mm256_fmadd_ps(vp, vt, vc3);
197     vp = _mm256_fmadd_ps(vp, vt, vc2);
198     vp = _mm256_fmadd_ps(vp, vt, vc1);
199 
200     vt = _mm256_mul_ps(vt, vs);
201     __m256 vf = _mm256_fmadd_ps(vt, vp, vs);
202 
203     vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
204 
205     _mm256_storeu_ps(output, vf);
206     output += 8;
207 
208     vacc = _mm256_add_ps(vacc, vf);
209   }
210   if (elements != 0) {
211     assert(elements >= 1 * sizeof(float));
212     assert(elements <= 7 * sizeof(float));
213     const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &params->avx2_rr1_p5.mask_table[7] - elements));
214 
215     const __m256 vi = _mm256_maskload_ps(input, vmask);
216 
217     const __m256 vx = _mm256_sub_ps(vi, vi_max);
218 
219     __m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
220 
221     const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
222 
223     vn = _mm256_sub_ps(vn, vmagic_bias);
224 
225     __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
226 
227     __m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
228     vp = _mm256_fmadd_ps(vp, vt, vc3);
229     vp = _mm256_fmadd_ps(vp, vt, vc2);
230     vp = _mm256_fmadd_ps(vp, vt, vc1);
231 
232     vt = _mm256_mul_ps(vt, vs);
233     __m256 vf = _mm256_fmadd_ps(vt, vp, vs);
234 
235     vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
236 
237     __m128 vf_lo = _mm256_castps256_ps128(vf);
238     if (elements & (4 * sizeof(float))) {
239       _mm_storeu_ps(output, vf_lo);
240       vf_lo = _mm256_extractf128_ps(vf, 1);
241       output += 4;
242     }
243     if (elements & (2 * sizeof(float))) {
244       _mm_storel_pi((__m64*) output, vf_lo);
245       vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
246       output += 2;
247     }
248     if (elements & (1 * sizeof(float))) {
249       _mm_store_ss(output, vf_lo);
250     }
251 
252     vacc = _mm256_add_ps(vacc, _mm256_and_ps(vf, _mm256_castsi256_ps(vmask)));
253   }
254   __m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
255   vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
256   vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
257   _mm_store_ss(sum, vacc_lo);
258   _mm256_zeroupper();
259 }
260