xref: /aosp_15_r20/external/XNNPACK/src/f32-vsqrt/gen/fma3-nr1fma1adj-x48.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-vsqrt/fma3-nr1fma1adj.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/vunary.h>
16 
17 
xnn_f32_vsqrt_ukernel__fma3_nr1fma1adj_x48(size_t n,const float * x,float * y,const union xnn_f32_sqrt_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_vsqrt_ukernel__fma3_nr1fma1adj_x48(
19     size_t n,
20     const float* x,
21     float* y,
22     const union xnn_f32_sqrt_params params[restrict XNN_MIN_ELEMENTS(1)])
23 {
24   assert(n != 0);
25   assert(n % sizeof(float) == 0);
26 
27   const __m256 vhalf = _mm256_load_ps(params->fma.half);
28   for (; n >= 48 * sizeof(float); n -= 48 * sizeof(float)) {
29     const __m256 vx0 = _mm256_loadu_ps(x);
30     const __m256 vx1 = _mm256_loadu_ps(x + 8);
31     const __m256 vx2 = _mm256_loadu_ps(x + 16);
32     const __m256 vx3 = _mm256_loadu_ps(x + 24);
33     const __m256 vx4 = _mm256_loadu_ps(x + 32);
34     const __m256 vx5 = _mm256_loadu_ps(x + 40);
35     x += 48;
36 
37     const __m256 vrsqrtx0 = _mm256_rsqrt_ps(vx0);
38     const __m256 vrsqrtx1 = _mm256_rsqrt_ps(vx1);
39     const __m256 vrsqrtx2 = _mm256_rsqrt_ps(vx2);
40     const __m256 vrsqrtx3 = _mm256_rsqrt_ps(vx3);
41     const __m256 vrsqrtx4 = _mm256_rsqrt_ps(vx4);
42     const __m256 vrsqrtx5 = _mm256_rsqrt_ps(vx5);
43 
44     __m256 vsqrtx0 = _mm256_mul_ps(vrsqrtx0, vx0);
45     __m256 vhalfrsqrtx0 = _mm256_mul_ps(vrsqrtx0, vhalf);
46     __m256 vsqrtx1 = _mm256_mul_ps(vrsqrtx1, vx1);
47     __m256 vhalfrsqrtx1 = _mm256_mul_ps(vrsqrtx1, vhalf);
48     __m256 vsqrtx2 = _mm256_mul_ps(vrsqrtx2, vx2);
49     __m256 vhalfrsqrtx2 = _mm256_mul_ps(vrsqrtx2, vhalf);
50     __m256 vsqrtx3 = _mm256_mul_ps(vrsqrtx3, vx3);
51     __m256 vhalfrsqrtx3 = _mm256_mul_ps(vrsqrtx3, vhalf);
52     __m256 vsqrtx4 = _mm256_mul_ps(vrsqrtx4, vx4);
53     __m256 vhalfrsqrtx4 = _mm256_mul_ps(vrsqrtx4, vhalf);
54     __m256 vsqrtx5 = _mm256_mul_ps(vrsqrtx5, vx5);
55     __m256 vhalfrsqrtx5 = _mm256_mul_ps(vrsqrtx5, vhalf);
56 
57     const __m256 vresidual0 = _mm256_fnmadd_ps(vsqrtx0, vhalfrsqrtx0, vhalf);
58     const __m256 vresidual1 = _mm256_fnmadd_ps(vsqrtx1, vhalfrsqrtx1, vhalf);
59     const __m256 vresidual2 = _mm256_fnmadd_ps(vsqrtx2, vhalfrsqrtx2, vhalf);
60     const __m256 vresidual3 = _mm256_fnmadd_ps(vsqrtx3, vhalfrsqrtx3, vhalf);
61     const __m256 vresidual4 = _mm256_fnmadd_ps(vsqrtx4, vhalfrsqrtx4, vhalf);
62     const __m256 vresidual5 = _mm256_fnmadd_ps(vsqrtx5, vhalfrsqrtx5, vhalf);
63 
64     vhalfrsqrtx0 = _mm256_fmadd_ps(vhalfrsqrtx0, vresidual0, vhalfrsqrtx0);
65     vsqrtx0 = _mm256_fmadd_ps(vsqrtx0, vresidual0, vsqrtx0);
66     vhalfrsqrtx1 = _mm256_fmadd_ps(vhalfrsqrtx1, vresidual1, vhalfrsqrtx1);
67     vsqrtx1 = _mm256_fmadd_ps(vsqrtx1, vresidual1, vsqrtx1);
68     vhalfrsqrtx2 = _mm256_fmadd_ps(vhalfrsqrtx2, vresidual2, vhalfrsqrtx2);
69     vsqrtx2 = _mm256_fmadd_ps(vsqrtx2, vresidual2, vsqrtx2);
70     vhalfrsqrtx3 = _mm256_fmadd_ps(vhalfrsqrtx3, vresidual3, vhalfrsqrtx3);
71     vsqrtx3 = _mm256_fmadd_ps(vsqrtx3, vresidual3, vsqrtx3);
72     vhalfrsqrtx4 = _mm256_fmadd_ps(vhalfrsqrtx4, vresidual4, vhalfrsqrtx4);
73     vsqrtx4 = _mm256_fmadd_ps(vsqrtx4, vresidual4, vsqrtx4);
74     vhalfrsqrtx5 = _mm256_fmadd_ps(vhalfrsqrtx5, vresidual5, vhalfrsqrtx5);
75     vsqrtx5 = _mm256_fmadd_ps(vsqrtx5, vresidual5, vsqrtx5);
76 
77     const __m256 vadjustment0 = _mm256_fnmadd_ps(vsqrtx0, vsqrtx0, vx0);
78     const __m256 vadjustment1 = _mm256_fnmadd_ps(vsqrtx1, vsqrtx1, vx1);
79     const __m256 vadjustment2 = _mm256_fnmadd_ps(vsqrtx2, vsqrtx2, vx2);
80     const __m256 vadjustment3 = _mm256_fnmadd_ps(vsqrtx3, vsqrtx3, vx3);
81     const __m256 vadjustment4 = _mm256_fnmadd_ps(vsqrtx4, vsqrtx4, vx4);
82     const __m256 vadjustment5 = _mm256_fnmadd_ps(vsqrtx5, vsqrtx5, vx5);
83 
84     const __m256 vy0 = _mm256_fmadd_ps(vhalfrsqrtx0, vadjustment0, vsqrtx0);
85     const __m256 vy1 = _mm256_fmadd_ps(vhalfrsqrtx1, vadjustment1, vsqrtx1);
86     const __m256 vy2 = _mm256_fmadd_ps(vhalfrsqrtx2, vadjustment2, vsqrtx2);
87     const __m256 vy3 = _mm256_fmadd_ps(vhalfrsqrtx3, vadjustment3, vsqrtx3);
88     const __m256 vy4 = _mm256_fmadd_ps(vhalfrsqrtx4, vadjustment4, vsqrtx4);
89     const __m256 vy5 = _mm256_fmadd_ps(vhalfrsqrtx5, vadjustment5, vsqrtx5);
90 
91     _mm256_storeu_ps(y, vy0);
92     _mm256_storeu_ps(y + 8, vy1);
93     _mm256_storeu_ps(y + 16, vy2);
94     _mm256_storeu_ps(y + 24, vy3);
95     _mm256_storeu_ps(y + 32, vy4);
96     _mm256_storeu_ps(y + 40, vy5);
97     y += 48;
98   }
99   for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
100     const __m256 vx = _mm256_loadu_ps(x);
101     x += 8;
102 
103     const __m256 vrsqrtx = _mm256_rsqrt_ps(vx);
104     __m256 vsqrtx = _mm256_mul_ps(vrsqrtx, vx);
105     __m256 vhalfrsqrtx = _mm256_mul_ps(vrsqrtx, vhalf);
106     const __m256 vresidual = _mm256_fnmadd_ps(vsqrtx, vhalfrsqrtx, vhalf);
107     vhalfrsqrtx = _mm256_fmadd_ps(vhalfrsqrtx, vresidual, vhalfrsqrtx);
108     vsqrtx = _mm256_fmadd_ps(vsqrtx, vresidual, vsqrtx);
109     const __m256 vadjustment = _mm256_fnmadd_ps(vsqrtx, vsqrtx, vx);
110     const __m256 vy = _mm256_fmadd_ps(vhalfrsqrtx, vadjustment, vsqrtx);
111 
112     _mm256_storeu_ps(y, vy);
113     y += 8;
114   }
115   if XNN_UNLIKELY(n != 0) {
116     assert(n >= 1 * sizeof(float));
117     assert(n <= 7 * sizeof(float));
118     const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &params->fma.mask_table[7] - n));
119 
120     const __m256 vx = _mm256_maskload_ps(x, vmask);
121 
122     const __m256 vrsqrtx = _mm256_rsqrt_ps(vx);
123     __m256 vsqrtx = _mm256_mul_ps(vrsqrtx, vx);
124     __m256 vhalfrsqrtx = _mm256_mul_ps(vrsqrtx, vhalf);
125     const __m256 vresidual = _mm256_fnmadd_ps(vsqrtx, vhalfrsqrtx, vhalf);
126     vhalfrsqrtx = _mm256_fmadd_ps(vhalfrsqrtx, vresidual, vhalfrsqrtx);
127     vsqrtx = _mm256_fmadd_ps(vsqrtx, vresidual, vsqrtx);
128     const __m256 vadjustment = _mm256_fnmadd_ps(vsqrtx, vsqrtx, vx);
129     const __m256 vy = _mm256_fmadd_ps(vhalfrsqrtx, vadjustment, vsqrtx);
130 
131     __m128 vy_lo = _mm256_castps256_ps128(vy);
132     if (n & (4 * sizeof(float))) {
133       _mm_storeu_ps(y, vy_lo);
134       vy_lo = _mm256_extractf128_ps(vy, 1);
135       y += 4;
136     }
137     if (n & (2 * sizeof(float))) {
138       _mm_storel_pi((__m64*) y, vy_lo);
139       vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
140       y += 2;
141     }
142     if (n & (1 * sizeof(float))) {
143       _mm_store_ss(y, vy_lo);
144     }
145   }
146 }
147