xref: /aosp_15_r20/external/XNNPACK/src/f32-vsigmoid/gen/vsigmoid-avx512f-rr1-p5-scalef-nr1fma-x16.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-vsigmoid/avx512f-rr1-p5-scalef.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 #include <xnnpack/vunary.h>
17 
18 
xnn_f32_vsigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x16(size_t n,const float * x,float * y,const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_vsigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x16(
20     size_t n,
21     const float* x,
22     float* y,
23     const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
24 {
25   assert(n % sizeof(float) == 0);
26 
27   const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr1_p5.sign_mask);
28   const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p5.log2e);
29   const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p5.minus_ln2);
30   const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p5.c5);
31   const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p5.c4);
32   const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p5.c3);
33   const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p5.c2);
34   const __m512 vc1 = _mm512_set1_ps(params->avx512_rr1_p5.c1);
35   const __m512 vone = _mm512_set1_ps(params->avx512_rr1_p5.one);
36 
37   for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
38     const __m512 vx = _mm512_loadu_ps(x);
39     x += 16;
40 
41     const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
42 
43     const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vz, vlog2e), 0);
44 
45     __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
46 
47     __m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
48     vp = _mm512_fmadd_ps(vp, vt, vc3);
49     vp = _mm512_fmadd_ps(vp, vt, vc2);
50     vp = _mm512_fmadd_ps(vp, vt, vc1);
51     vp = _mm512_fmadd_ps(vp, vt, vone);
52 
53     const __m512 ve = _mm512_scalef_ps(vp, vn);
54     const __m512 vd = _mm512_add_ps(ve, vone);
55 
56     __m512 vr = _mm512_rcp14_ps(vd);
57     vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
58 
59     __m512 vf = _mm512_mul_ps(ve, vr);
60 
61     vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
62 
63     _mm512_storeu_ps(y, vf);
64     y += 16;
65   }
66   if XNN_UNLIKELY(n != 0) {
67     assert(n >= 1 * sizeof(float));
68     assert(n <= 15 * sizeof(float));
69 
70     // Prepare mask for valid 32-bit elements (depends on n).
71     n >>= 2 /* log2(sizeof(float)) */;
72     const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
73 
74     const __m512 vx = _mm512_maskz_loadu_ps(vmask, x);
75     const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
76 
77     const __m512 vn = _mm512_roundscale_ps(_mm512_mul_ps(vz, vlog2e), 0);
78 
79     __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz);
80 
81     __m512 vp = _mm512_fmadd_ps(vc5, vt, vc4);
82     vp = _mm512_fmadd_ps(vp, vt, vc3);
83     vp = _mm512_fmadd_ps(vp, vt, vc2);
84     vp = _mm512_fmadd_ps(vp, vt, vc1);
85     vp = _mm512_fmadd_ps(vp, vt, vone);
86 
87     const __m512 ve = _mm512_scalef_ps(vp, vn);
88     const __m512 vd = _mm512_add_ps(ve, vone);
89 
90     __m512 vr = _mm512_rcp14_ps(vd);
91     vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
92 
93     __m512 vf = _mm512_mul_ps(ve, vr);
94 
95     vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
96 
97     _mm512_mask_storeu_ps(y, vmask, vf);
98   }
99 }
100