xref: /aosp_15_r20/external/XNNPACK/src/f32-raddstoreexpminusmax/gen/avx2-rr1-p5-x64.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-raddstoreexpminusmax/avx2-rr1-p5.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/raddstoreexpminusmax.h>
15 
16 
xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_x64(size_t elements,const float * input,const float * max,float * output,float * sum,const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_x64(
18     size_t elements,
19     const float* input,
20     const float* max,
21     float* output,
22     float* sum,
23     const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
24 {
25   assert(elements % sizeof(float) == 0);
26 
27   const __m256 vi_max = _mm256_broadcast_ss(max);
28   const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
29   const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
30   const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
31   const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
32   const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
33   const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
34   const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
35   const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
36   const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
37 
38   __m256 vacc0 = _mm256_setzero_ps();
39   for (; elements >= 64 * sizeof(float); elements -= 64 * sizeof(float)) {
40     const __m256 vi0 = _mm256_loadu_ps(input);
41     const __m256 vi1 = _mm256_loadu_ps(input + 8);
42     const __m256 vi2 = _mm256_loadu_ps(input + 16);
43     const __m256 vi3 = _mm256_loadu_ps(input + 24);
44     const __m256 vi4 = _mm256_loadu_ps(input + 32);
45     const __m256 vi5 = _mm256_loadu_ps(input + 40);
46     const __m256 vi6 = _mm256_loadu_ps(input + 48);
47     const __m256 vi7 = _mm256_loadu_ps(input + 56);
48     input += 64;
49 
50     const __m256 vx0 = _mm256_sub_ps(vi0, vi_max);
51     const __m256 vx1 = _mm256_sub_ps(vi1, vi_max);
52     const __m256 vx2 = _mm256_sub_ps(vi2, vi_max);
53     const __m256 vx3 = _mm256_sub_ps(vi3, vi_max);
54     const __m256 vx4 = _mm256_sub_ps(vi4, vi_max);
55     const __m256 vx5 = _mm256_sub_ps(vi5, vi_max);
56     const __m256 vx6 = _mm256_sub_ps(vi6, vi_max);
57     const __m256 vx7 = _mm256_sub_ps(vi7, vi_max);
58 
59     __m256 vn0 = _mm256_fmadd_ps(vx0, vlog2e, vmagic_bias);
60     __m256 vn1 = _mm256_fmadd_ps(vx1, vlog2e, vmagic_bias);
61     __m256 vn2 = _mm256_fmadd_ps(vx2, vlog2e, vmagic_bias);
62     __m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias);
63     __m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);
64     __m256 vn5 = _mm256_fmadd_ps(vx5, vlog2e, vmagic_bias);
65     __m256 vn6 = _mm256_fmadd_ps(vx6, vlog2e, vmagic_bias);
66     __m256 vn7 = _mm256_fmadd_ps(vx7, vlog2e, vmagic_bias);
67 
68     const __m256 vs0 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn0), 23));
69     const __m256 vs1 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn1), 23));
70     const __m256 vs2 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn2), 23));
71     const __m256 vs3 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn3), 23));
72     const __m256 vs4 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn4), 23));
73     const __m256 vs5 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn5), 23));
74     const __m256 vs6 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn6), 23));
75     const __m256 vs7 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn7), 23));
76 
77     vn0 = _mm256_sub_ps(vn0, vmagic_bias);
78     vn1 = _mm256_sub_ps(vn1, vmagic_bias);
79     vn2 = _mm256_sub_ps(vn2, vmagic_bias);
80     vn3 = _mm256_sub_ps(vn3, vmagic_bias);
81     vn4 = _mm256_sub_ps(vn4, vmagic_bias);
82     vn5 = _mm256_sub_ps(vn5, vmagic_bias);
83     vn6 = _mm256_sub_ps(vn6, vmagic_bias);
84     vn7 = _mm256_sub_ps(vn7, vmagic_bias);
85 
86     __m256 vt0 = _mm256_fmadd_ps(vn0, vminus_ln2, vx0);
87     __m256 vt1 = _mm256_fmadd_ps(vn1, vminus_ln2, vx1);
88     __m256 vt2 = _mm256_fmadd_ps(vn2, vminus_ln2, vx2);
89     __m256 vt3 = _mm256_fmadd_ps(vn3, vminus_ln2, vx3);
90     __m256 vt4 = _mm256_fmadd_ps(vn4, vminus_ln2, vx4);
91     __m256 vt5 = _mm256_fmadd_ps(vn5, vminus_ln2, vx5);
92     __m256 vt6 = _mm256_fmadd_ps(vn6, vminus_ln2, vx6);
93     __m256 vt7 = _mm256_fmadd_ps(vn7, vminus_ln2, vx7);
94 
95     __m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);
96     __m256 vp1 = _mm256_fmadd_ps(vc5, vt1, vc4);
97     __m256 vp2 = _mm256_fmadd_ps(vc5, vt2, vc4);
98     __m256 vp3 = _mm256_fmadd_ps(vc5, vt3, vc4);
99     __m256 vp4 = _mm256_fmadd_ps(vc5, vt4, vc4);
100     __m256 vp5 = _mm256_fmadd_ps(vc5, vt5, vc4);
101     __m256 vp6 = _mm256_fmadd_ps(vc5, vt6, vc4);
102     __m256 vp7 = _mm256_fmadd_ps(vc5, vt7, vc4);
103 
104     vp0 = _mm256_fmadd_ps(vp0, vt0, vc3);
105     vp1 = _mm256_fmadd_ps(vp1, vt1, vc3);
106     vp2 = _mm256_fmadd_ps(vp2, vt2, vc3);
107     vp3 = _mm256_fmadd_ps(vp3, vt3, vc3);
108     vp4 = _mm256_fmadd_ps(vp4, vt4, vc3);
109     vp5 = _mm256_fmadd_ps(vp5, vt5, vc3);
110     vp6 = _mm256_fmadd_ps(vp6, vt6, vc3);
111     vp7 = _mm256_fmadd_ps(vp7, vt7, vc3);
112 
113     vp0 = _mm256_fmadd_ps(vp0, vt0, vc2);
114     vp1 = _mm256_fmadd_ps(vp1, vt1, vc2);
115     vp2 = _mm256_fmadd_ps(vp2, vt2, vc2);
116     vp3 = _mm256_fmadd_ps(vp3, vt3, vc2);
117     vp4 = _mm256_fmadd_ps(vp4, vt4, vc2);
118     vp5 = _mm256_fmadd_ps(vp5, vt5, vc2);
119     vp6 = _mm256_fmadd_ps(vp6, vt6, vc2);
120     vp7 = _mm256_fmadd_ps(vp7, vt7, vc2);
121 
122     vp0 = _mm256_fmadd_ps(vp0, vt0, vc1);
123     vp1 = _mm256_fmadd_ps(vp1, vt1, vc1);
124     vp2 = _mm256_fmadd_ps(vp2, vt2, vc1);
125     vp3 = _mm256_fmadd_ps(vp3, vt3, vc1);
126     vp4 = _mm256_fmadd_ps(vp4, vt4, vc1);
127     vp5 = _mm256_fmadd_ps(vp5, vt5, vc1);
128     vp6 = _mm256_fmadd_ps(vp6, vt6, vc1);
129     vp7 = _mm256_fmadd_ps(vp7, vt7, vc1);
130 
131     vt0 = _mm256_mul_ps(vt0, vs0);
132     vt1 = _mm256_mul_ps(vt1, vs1);
133     vt2 = _mm256_mul_ps(vt2, vs2);
134     vt3 = _mm256_mul_ps(vt3, vs3);
135     vt4 = _mm256_mul_ps(vt4, vs4);
136     vt5 = _mm256_mul_ps(vt5, vs5);
137     vt6 = _mm256_mul_ps(vt6, vs6);
138     vt7 = _mm256_mul_ps(vt7, vs7);
139 
140     __m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0);
141     __m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1);
142     __m256 vf2 = _mm256_fmadd_ps(vt2, vp2, vs2);
143     __m256 vf3 = _mm256_fmadd_ps(vt3, vp3, vs3);
144     __m256 vf4 = _mm256_fmadd_ps(vt4, vp4, vs4);
145     __m256 vf5 = _mm256_fmadd_ps(vt5, vp5, vs5);
146     __m256 vf6 = _mm256_fmadd_ps(vt6, vp6, vs6);
147     __m256 vf7 = _mm256_fmadd_ps(vt7, vp7, vs7);
148 
149     vf0 = _mm256_andnot_ps(_mm256_cmp_ps(vx0, vdenorm_cutoff, _CMP_LT_OS), vf0);
150     vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1);
151     vf2 = _mm256_andnot_ps(_mm256_cmp_ps(vx2, vdenorm_cutoff, _CMP_LT_OS), vf2);
152     vf3 = _mm256_andnot_ps(_mm256_cmp_ps(vx3, vdenorm_cutoff, _CMP_LT_OS), vf3);
153     vf4 = _mm256_andnot_ps(_mm256_cmp_ps(vx4, vdenorm_cutoff, _CMP_LT_OS), vf4);
154     vf5 = _mm256_andnot_ps(_mm256_cmp_ps(vx5, vdenorm_cutoff, _CMP_LT_OS), vf5);
155     vf6 = _mm256_andnot_ps(_mm256_cmp_ps(vx6, vdenorm_cutoff, _CMP_LT_OS), vf6);
156     vf7 = _mm256_andnot_ps(_mm256_cmp_ps(vx7, vdenorm_cutoff, _CMP_LT_OS), vf7);
157 
158     _mm256_storeu_ps(output, vf0);
159     _mm256_storeu_ps(output + 8, vf1);
160     _mm256_storeu_ps(output + 16, vf2);
161     _mm256_storeu_ps(output + 24, vf3);
162     _mm256_storeu_ps(output + 32, vf4);
163     _mm256_storeu_ps(output + 40, vf5);
164     _mm256_storeu_ps(output + 48, vf6);
165     _mm256_storeu_ps(output + 56, vf7);
166     output += 64;
167 
168     vacc0 = _mm256_add_ps(vacc0, vf0);
169     vacc0 = _mm256_add_ps(vacc0, vf1);
170     vacc0 = _mm256_add_ps(vacc0, vf2);
171     vacc0 = _mm256_add_ps(vacc0, vf3);
172     vacc0 = _mm256_add_ps(vacc0, vf4);
173     vacc0 = _mm256_add_ps(vacc0, vf5);
174     vacc0 = _mm256_add_ps(vacc0, vf6);
175     vacc0 = _mm256_add_ps(vacc0, vf7);
176   }
177 
178   __m256 vacc = vacc0;
179   for (; elements >= 8 * sizeof(float); elements -= 8 * sizeof(float)) {
180     const __m256 vi = _mm256_loadu_ps(input);
181     input += 8;
182 
183     const __m256 vx = _mm256_sub_ps(vi, vi_max);
184 
185     __m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
186 
187     const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
188 
189     vn = _mm256_sub_ps(vn, vmagic_bias);
190 
191     __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
192 
193     __m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
194     vp = _mm256_fmadd_ps(vp, vt, vc3);
195     vp = _mm256_fmadd_ps(vp, vt, vc2);
196     vp = _mm256_fmadd_ps(vp, vt, vc1);
197 
198     vt = _mm256_mul_ps(vt, vs);
199     __m256 vf = _mm256_fmadd_ps(vt, vp, vs);
200 
201     vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
202 
203     _mm256_storeu_ps(output, vf);
204     output += 8;
205 
206     vacc = _mm256_add_ps(vacc, vf);
207   }
208   if (elements != 0) {
209     assert(elements >= 1 * sizeof(float));
210     assert(elements <= 7 * sizeof(float));
211     const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &params->avx2_rr1_p5.mask_table[7] - elements));
212 
213     const __m256 vi = _mm256_maskload_ps(input, vmask);
214 
215     const __m256 vx = _mm256_sub_ps(vi, vi_max);
216 
217     __m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
218 
219     const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
220 
221     vn = _mm256_sub_ps(vn, vmagic_bias);
222 
223     __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
224 
225     __m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
226     vp = _mm256_fmadd_ps(vp, vt, vc3);
227     vp = _mm256_fmadd_ps(vp, vt, vc2);
228     vp = _mm256_fmadd_ps(vp, vt, vc1);
229 
230     vt = _mm256_mul_ps(vt, vs);
231     __m256 vf = _mm256_fmadd_ps(vt, vp, vs);
232 
233     vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
234 
235     __m128 vf_lo = _mm256_castps256_ps128(vf);
236     if (elements & (4 * sizeof(float))) {
237       _mm_storeu_ps(output, vf_lo);
238       vf_lo = _mm256_extractf128_ps(vf, 1);
239       output += 4;
240     }
241     if (elements & (2 * sizeof(float))) {
242       _mm_storel_pi((__m64*) output, vf_lo);
243       vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
244       output += 2;
245     }
246     if (elements & (1 * sizeof(float))) {
247       _mm_store_ss(output, vf_lo);
248     }
249 
250     vacc = _mm256_add_ps(vacc, _mm256_and_ps(vf, _mm256_castsi256_ps(vmask)));
251   }
252   __m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
253   vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
254   vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
255   _mm_store_ss(sum, vacc_lo);
256   _mm256_zeroupper();
257 }
258