1 // Auto-generated file. Do not edit!
2 // Template: src/f32-vsigmoid/avx512f-rr2-lut32-p2-perm2-scalef.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <immintrin.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 #include <xnnpack/vunary.h>
17
18
xnn_f32_vsigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma_x16(size_t n,const float * x,float * y,const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_vsigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_nr1fma_x16(
20 size_t n,
21 const float* x,
22 float* y,
23 const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)])
24 {
25 assert(n % sizeof(float) == 0);
26
27 const __m512i vsign_mask = _mm512_set1_epi32((int) params->avx512_rr2_lut32_p2.sign_mask);
28 const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr2_lut32_p2.magic_bias);
29 const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr2_lut32_p2.log2e);
30 const __m512 vtable_lo = _mm512_load_ps(params->avx512_rr2_lut32_p2.table_lo);
31 const __m512 vtable_hi = _mm512_load_ps(params->avx512_rr2_lut32_p2.table_hi);
32 const __m512 vminus_ln2_hi = _mm512_set1_ps(params->avx512_rr2_lut32_p2.minus_ln2_hi);
33 const __m512 vminus_ln2_lo = _mm512_set1_ps(params->avx512_rr2_lut32_p2.minus_ln2_lo);
34 const __m512 vc2 = _mm512_set1_ps(params->avx512_rr2_lut32_p2.c2);
35 const __m512 vc1 = _mm512_set1_ps(params->avx512_rr2_lut32_p2.c1);
36 const __m512 vone = _mm512_set1_ps(params->avx512_rr2_lut32_p2.one);
37
38 for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
39 const __m512 vx = _mm512_loadu_ps(x);
40 x += 16;
41
42 const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
43
44 __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
45 const __m512 vl = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn), vtable_hi);
46 vn = _mm512_sub_ps(vn, vmagic_bias);
47
48 __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vz);
49 vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
50
51 __m512 vp = _mm512_fmadd_ps(vt, vc2, vc1);
52 vt = _mm512_mul_ps(vt, vl);
53 vp = _mm512_fmadd_ps(vt, vp, vl);
54
55 const __m512 ve = _mm512_scalef_ps(vp, vn);
56 const __m512 vd = _mm512_add_ps(ve, vone);
57
58 __m512 vr = _mm512_rcp14_ps(vd);
59 vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
60
61 __m512 vf = _mm512_mul_ps(ve, vr);
62
63 vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
64
65 _mm512_storeu_ps(y, vf);
66 y += 16;
67 }
68 if XNN_UNLIKELY(n != 0) {
69 assert(n >= 1 * sizeof(float));
70 assert(n <= 15 * sizeof(float));
71
72 // Prepare mask for valid 32-bit elements (depends on n).
73 n >>= 2 /* log2(sizeof(float)) */;
74 const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
75
76 const __m512 vx = _mm512_maskz_loadu_ps(vmask, x);
77 const __m512 vz = _mm512_castsi512_ps(_mm512_or_epi32(_mm512_castps_si512(vx), vsign_mask));
78
79 __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias);
80 const __m512 vl = _mm512_permutex2var_ps(vtable_lo, _mm512_castps_si512(vn), vtable_hi);
81 vn = _mm512_sub_ps(vn, vmagic_bias);
82
83 __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2_hi, vz);
84 vt = _mm512_fmadd_ps(vn, vminus_ln2_lo, vt);
85
86 __m512 vp = _mm512_fmadd_ps(vt, vc2, vc1);
87 vt = _mm512_mul_ps(vt, vl);
88 vp = _mm512_fmadd_ps(vt, vp, vl);
89
90 const __m512 ve = _mm512_scalef_ps(vp, vn);
91 const __m512 vd = _mm512_add_ps(ve, vone);
92
93 __m512 vr = _mm512_rcp14_ps(vd);
94 vr = _mm512_fmadd_ps(_mm512_fnmadd_ps(vr, vd, vone), vr, vr);
95
96 __m512 vf = _mm512_mul_ps(ve, vr);
97
98 vf = _mm512_mask_sub_ps(vf, _mm512_testn_epi32_mask(_mm512_castps_si512(vx), vsign_mask), vone, vf);
99
100 _mm512_mask_storeu_ps(y, vmask, vf);
101 }
102 }
103