1 // Auto-generated file. Do not edit!
2 // Template: src/f32-vsigmoid/avx512f-rr1-p5-scalef.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <immintrin.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 #include <xnnpack/vunary.h>
17
18
xnn_f32_vsigmoid_ukernel__avx512f_rr1_p5_scalef_div_x80(size_t n,const float * x,float * y,const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_vsigmoid_ukernel__avx512f_rr1_p5_scalef_div_x80(
20 size_t n,
21 const float* x,
22 float* y,
23 const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
24 {
25 assert(n % sizeof(float) == 0);
26
27 const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr1_p5.sign_mask);
28 const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p5.log2e);
29 const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p5.minus_ln2);
30 const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p5.c5);
31 const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p5.c4);
32 const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p5.c3);
33 const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p5.c2);
34 const __m512 vc1 = _mm512_set1_ps(params->avx512_rr1_p5.c1);
35 const __m512 vone = _mm512_set1_ps(params->avx512_rr1_p5.one);
36
37 for (; n >= 80 * sizeof(float); n -= 80 * sizeof(float)) {
38 const __m512 vx0 = _mm512_loadu_ps(x);
39 const __m512 vx1 = _mm512_loadu_ps(x + 16);
40 const __m512 vx2 = _mm512_loadu_ps(x + 32);
41 const __m512 vx3 = _mm512_loadu_ps(x + 48);
42 const __m512 vx4 = _mm512_loadu_ps(x + 64);
43 x += 80;
44
45 const __m512 vz0 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx0), vsign_mask));
46 const __m512 vz1 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx1), vsign_mask));
47 const __m512 vz2 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx2), vsign_mask));
48 const __m512 vz3 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx3), vsign_mask));
49 const __m512 vz4 = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx4), vsign_mask));
50
51 __m512 vn0 = _mm512_mul_ps(vz0, vlog2e);
52 __m512 vn1 = _mm512_mul_ps(vz1, vlog2e);
53 __m512 vn2 = _mm512_mul_ps(vz2, vlog2e);
54 __m512 vn3 = _mm512_mul_ps(vz3, vlog2e);
55 __m512 vn4 = _mm512_mul_ps(vz4, vlog2e);
56
57 vn0 = _mm512_roundscale_ps(vn0, 0);
58 vn1 = _mm512_roundscale_ps(vn1, 0);
59 vn2 = _mm512_roundscale_ps(vn2, 0);
60 vn3 = _mm512_roundscale_ps(vn3, 0);
61 vn4 = _mm512_roundscale_ps(vn4, 0);
62
63 __m512 vt0 = _mm512_fmadd_ps(vn0, vminus_ln2, vz0);
64 __m512 vt1 = _mm512_fmadd_ps(vn1, vminus_ln2, vz1);
65 __m512 vt2 = _mm512_fmadd_ps(vn2, vminus_ln2, vz2);
66 __m512 vt3 = _mm512_fmadd_ps(vn3, vminus_ln2, vz3);
67 __m512 vt4 = _mm512_fmadd_ps(vn4, vminus_ln2, vz4);
68
69 __m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);
70 __m512 vp1 = _mm512_fmadd_ps(vc5, vt1, vc4);
71 __m512 vp2 = _mm512_fmadd_ps(vc5, vt2, vc4);
72 __m512 vp3 = _mm512_fmadd_ps(vc5, vt3, vc4);
73 __m512 vp4 = _mm512_fmadd_ps(vc5, vt4, vc4);
74
75 vp0 = _mm512_fmadd_ps(vp0, vt0, vc3);
76 vp1 = _mm512_fmadd_ps(vp1, vt1, vc3);
77 vp2 = _mm512_fmadd_ps(vp2, vt2, vc3);
78 vp3 = _mm512_fmadd_ps(vp3, vt3, vc3);
79 vp4 = _mm512_fmadd_ps(vp4, vt4, vc3);
80
81 vp0 = _mm512_fmadd_ps(vp0, vt0, vc2);
82 vp1 = _mm512_fmadd_ps(vp1, vt1, vc2);
83 vp2 = _mm512_fmadd_ps(vp2, vt2, vc2);
84 vp3 = _mm512_fmadd_ps(vp3, vt3, vc2);
85 vp4 = _mm512_fmadd_ps(vp4, vt4, vc2);
86
87 vp0 = _mm512_fmadd_ps(vp0, vt0, vc1);
88 vp1 = _mm512_fmadd_ps(vp1, vt1, vc1);
89 vp2 = _mm512_fmadd_ps(vp2, vt2, vc1);
90 vp3 = _mm512_fmadd_ps(vp3, vt3, vc1);
91 vp4 = _mm512_fmadd_ps(vp4, vt4, vc1);
92
93 vp0 = _mm512_fmadd_ps(vp0, vt0, vone);
94 vp1 = _mm512_fmadd_ps(vp1, vt1, vone);
95 vp2 = _mm512_fmadd_ps(vp2, vt2, vone);
96 vp3 = _mm512_fmadd_ps(vp3, vt3, vone);
97 vp4 = _mm512_fmadd_ps(vp4, vt4, vone);
98
99 const __m512 ve0 = _mm512_scalef_ps(vp0, vn0);
100 const __m512 ve1 = _mm512_scalef_ps(vp1, vn1);
101 const __m512 ve2 = _mm512_scalef_ps(vp2, vn2);
102 const __m512 ve3 = _mm512_scalef_ps(vp3, vn3);
103 const __m512 ve4 = _mm512_scalef_ps(vp4, vn4);
104
105 const __m512 vd0 = _mm512_add_ps(ve0, vone);
106 const __m512 vd1 = _mm512_add_ps(ve1, vone);
107 const __m512 vd2 = _mm512_add_ps(ve2, vone);
108 const __m512 vd3 = _mm512_add_ps(ve3, vone);
109 const __m512 vd4 = _mm512_add_ps(ve4, vone);
110
111 __m512 vf0 = _mm512_div_ps(ve0, vd0);
112 __m512 vf1 = _mm512_div_ps(ve1, vd1);
113 __m512 vf2 = _mm512_div_ps(ve2, vd2);
114 __m512 vf3 = _mm512_div_ps(ve3, vd3);
115 __m512 vf4 = _mm512_div_ps(ve4, vd4);
116
117 vf0 = _mm512_mask_sub_ps(vf0, _mm512_testn_epi32_mask(_mm512_castps_si512(vx0), vsign_mask), vone, vf0);
118 vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone, vf1);
119 vf2 = _mm512_mask_sub_ps(vf2, _mm512_testn_epi32_mask(_mm512_castps_si512(vx2), vsign_mask), vone, vf2);
120 vf3 = _mm512_mask_sub_ps(vf3, _mm512_testn_epi32_mask(_mm512_castps_si512(vx3), vsign_mask), vone, vf3);
121 vf4 = _mm512_mask_sub_ps(vf4, _mm512_testn_epi32_mask(_mm512_castps_si512(vx4), vsign_mask), vone, vf4);
122
123 _mm512_storeu_ps(y, vf0);
124 _mm512_storeu_ps(y + 16, vf1);
125 _mm512_storeu_ps(y + 32, vf2);
126 _mm512_storeu_ps(y + 48, vf3);
127 _mm512_storeu_ps(y + 64, vf4);
128 y += 80;
129 }
130 for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
131 const __m512 vx = _mm512_loadu_ps(x);
132 x += 16;
133
134 const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
135
136 const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vz, vlog2e), 0);
137
138 __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
139
140 __m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
141 vp = _mm512_fmadd_ps(vp, vt, vc3);
142 vp = _mm512_fmadd_ps(vp, vt, vc2);
143 vp = _mm512_fmadd_ps(vp, vt, vc1);
144 vp = _mm512_fmadd_ps(vp, vt, vone);
145
146 const __m512 ve = _mm512_scalef_ps(vp, vn);
147 const __m512 vd = _mm512_add_ps(ve, vone);
148
149 __m512 vf = _mm512_div_ps(ve, vd);
150
151 vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
152
153 _mm512_storeu_ps(y, vf);
154 y += 16;
155 }
156 if XNN_UNLIKELY(n != 0) {
157 assert(n >= 1 * sizeof(float));
158 assert(n <= 15 * sizeof(float));
159
160 // Prepare mask for valid 32-bit elements (depends on n).
161 n >>= 2 /* log2(sizeof(float)) */;
162 const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
163
164 const __m512 vx = _mm512_maskz_loadu_ps(vmask, x);
165 const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
166
167 const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vz, vlog2e), 0);
168
169 __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
170
171 __m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
172 vp = _mm512_fmadd_ps(vp, vt, vc3);
173 vp = _mm512_fmadd_ps(vp, vt, vc2);
174 vp = _mm512_fmadd_ps(vp, vt, vc1);
175 vp = _mm512_fmadd_ps(vp, vt, vone);
176
177 const __m512 ve = _mm512_scalef_ps(vp, vn);
178 const __m512 vd = _mm512_add_ps(ve, vone);
179
180 __m512 vf = _mm512_div_ps(ve, vd);
181
182 vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
183
184 _mm512_mask_storeu_ps(y, vmask, vf);
185 }
186 }
187