1 // Auto-generated file. Do not edit!
2 // Template: src/f32-vsqrt/fma3-nr1fma1adj.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <immintrin.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/vunary.h>
16
17
xnn_f32_vsqrt_ukernel__fma3_nr1fma1adj_x64(size_t n,const float * x,float * y,const union xnn_f32_sqrt_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_vsqrt_ukernel__fma3_nr1fma1adj_x64(
19 size_t n,
20 const float* x,
21 float* y,
22 const union xnn_f32_sqrt_params params[restrict XNN_MIN_ELEMENTS(1)])
23 {
24 assert(n != 0);
25 assert(n % sizeof(float) == 0);
26
27 const __m256 vhalf = _mm256_load_ps(params->fma.half);
28 for (; n >= 64 * sizeof(float); n -= 64 * sizeof(float)) {
29 const __m256 vx0 = _mm256_loadu_ps(x);
30 const __m256 vx1 = _mm256_loadu_ps(x + 8);
31 const __m256 vx2 = _mm256_loadu_ps(x + 16);
32 const __m256 vx3 = _mm256_loadu_ps(x + 24);
33 const __m256 vx4 = _mm256_loadu_ps(x + 32);
34 const __m256 vx5 = _mm256_loadu_ps(x + 40);
35 const __m256 vx6 = _mm256_loadu_ps(x + 48);
36 const __m256 vx7 = _mm256_loadu_ps(x + 56);
37 x += 64;
38
39 const __m256 vrsqrtx0 = _mm256_rsqrt_ps(vx0);
40 const __m256 vrsqrtx1 = _mm256_rsqrt_ps(vx1);
41 const __m256 vrsqrtx2 = _mm256_rsqrt_ps(vx2);
42 const __m256 vrsqrtx3 = _mm256_rsqrt_ps(vx3);
43 const __m256 vrsqrtx4 = _mm256_rsqrt_ps(vx4);
44 const __m256 vrsqrtx5 = _mm256_rsqrt_ps(vx5);
45 const __m256 vrsqrtx6 = _mm256_rsqrt_ps(vx6);
46 const __m256 vrsqrtx7 = _mm256_rsqrt_ps(vx7);
47
48 __m256 vsqrtx0 = _mm256_mul_ps(vrsqrtx0, vx0);
49 __m256 vhalfrsqrtx0 = _mm256_mul_ps(vrsqrtx0, vhalf);
50 __m256 vsqrtx1 = _mm256_mul_ps(vrsqrtx1, vx1);
51 __m256 vhalfrsqrtx1 = _mm256_mul_ps(vrsqrtx1, vhalf);
52 __m256 vsqrtx2 = _mm256_mul_ps(vrsqrtx2, vx2);
53 __m256 vhalfrsqrtx2 = _mm256_mul_ps(vrsqrtx2, vhalf);
54 __m256 vsqrtx3 = _mm256_mul_ps(vrsqrtx3, vx3);
55 __m256 vhalfrsqrtx3 = _mm256_mul_ps(vrsqrtx3, vhalf);
56 __m256 vsqrtx4 = _mm256_mul_ps(vrsqrtx4, vx4);
57 __m256 vhalfrsqrtx4 = _mm256_mul_ps(vrsqrtx4, vhalf);
58 __m256 vsqrtx5 = _mm256_mul_ps(vrsqrtx5, vx5);
59 __m256 vhalfrsqrtx5 = _mm256_mul_ps(vrsqrtx5, vhalf);
60 __m256 vsqrtx6 = _mm256_mul_ps(vrsqrtx6, vx6);
61 __m256 vhalfrsqrtx6 = _mm256_mul_ps(vrsqrtx6, vhalf);
62 __m256 vsqrtx7 = _mm256_mul_ps(vrsqrtx7, vx7);
63 __m256 vhalfrsqrtx7 = _mm256_mul_ps(vrsqrtx7, vhalf);
64
65 const __m256 vresidual0 = _mm256_fnmadd_ps(vsqrtx0, vhalfrsqrtx0, vhalf);
66 const __m256 vresidual1 = _mm256_fnmadd_ps(vsqrtx1, vhalfrsqrtx1, vhalf);
67 const __m256 vresidual2 = _mm256_fnmadd_ps(vsqrtx2, vhalfrsqrtx2, vhalf);
68 const __m256 vresidual3 = _mm256_fnmadd_ps(vsqrtx3, vhalfrsqrtx3, vhalf);
69 const __m256 vresidual4 = _mm256_fnmadd_ps(vsqrtx4, vhalfrsqrtx4, vhalf);
70 const __m256 vresidual5 = _mm256_fnmadd_ps(vsqrtx5, vhalfrsqrtx5, vhalf);
71 const __m256 vresidual6 = _mm256_fnmadd_ps(vsqrtx6, vhalfrsqrtx6, vhalf);
72 const __m256 vresidual7 = _mm256_fnmadd_ps(vsqrtx7, vhalfrsqrtx7, vhalf);
73
74 vhalfrsqrtx0 = _mm256_fmadd_ps(vhalfrsqrtx0, vresidual0, vhalfrsqrtx0);
75 vsqrtx0 = _mm256_fmadd_ps(vsqrtx0, vresidual0, vsqrtx0);
76 vhalfrsqrtx1 = _mm256_fmadd_ps(vhalfrsqrtx1, vresidual1, vhalfrsqrtx1);
77 vsqrtx1 = _mm256_fmadd_ps(vsqrtx1, vresidual1, vsqrtx1);
78 vhalfrsqrtx2 = _mm256_fmadd_ps(vhalfrsqrtx2, vresidual2, vhalfrsqrtx2);
79 vsqrtx2 = _mm256_fmadd_ps(vsqrtx2, vresidual2, vsqrtx2);
80 vhalfrsqrtx3 = _mm256_fmadd_ps(vhalfrsqrtx3, vresidual3, vhalfrsqrtx3);
81 vsqrtx3 = _mm256_fmadd_ps(vsqrtx3, vresidual3, vsqrtx3);
82 vhalfrsqrtx4 = _mm256_fmadd_ps(vhalfrsqrtx4, vresidual4, vhalfrsqrtx4);
83 vsqrtx4 = _mm256_fmadd_ps(vsqrtx4, vresidual4, vsqrtx4);
84 vhalfrsqrtx5 = _mm256_fmadd_ps(vhalfrsqrtx5, vresidual5, vhalfrsqrtx5);
85 vsqrtx5 = _mm256_fmadd_ps(vsqrtx5, vresidual5, vsqrtx5);
86 vhalfrsqrtx6 = _mm256_fmadd_ps(vhalfrsqrtx6, vresidual6, vhalfrsqrtx6);
87 vsqrtx6 = _mm256_fmadd_ps(vsqrtx6, vresidual6, vsqrtx6);
88 vhalfrsqrtx7 = _mm256_fmadd_ps(vhalfrsqrtx7, vresidual7, vhalfrsqrtx7);
89 vsqrtx7 = _mm256_fmadd_ps(vsqrtx7, vresidual7, vsqrtx7);
90
91 const __m256 vadjustment0 = _mm256_fnmadd_ps(vsqrtx0, vsqrtx0, vx0);
92 const __m256 vadjustment1 = _mm256_fnmadd_ps(vsqrtx1, vsqrtx1, vx1);
93 const __m256 vadjustment2 = _mm256_fnmadd_ps(vsqrtx2, vsqrtx2, vx2);
94 const __m256 vadjustment3 = _mm256_fnmadd_ps(vsqrtx3, vsqrtx3, vx3);
95 const __m256 vadjustment4 = _mm256_fnmadd_ps(vsqrtx4, vsqrtx4, vx4);
96 const __m256 vadjustment5 = _mm256_fnmadd_ps(vsqrtx5, vsqrtx5, vx5);
97 const __m256 vadjustment6 = _mm256_fnmadd_ps(vsqrtx6, vsqrtx6, vx6);
98 const __m256 vadjustment7 = _mm256_fnmadd_ps(vsqrtx7, vsqrtx7, vx7);
99
100 const __m256 vy0 = _mm256_fmadd_ps(vhalfrsqrtx0, vadjustment0, vsqrtx0);
101 const __m256 vy1 = _mm256_fmadd_ps(vhalfrsqrtx1, vadjustment1, vsqrtx1);
102 const __m256 vy2 = _mm256_fmadd_ps(vhalfrsqrtx2, vadjustment2, vsqrtx2);
103 const __m256 vy3 = _mm256_fmadd_ps(vhalfrsqrtx3, vadjustment3, vsqrtx3);
104 const __m256 vy4 = _mm256_fmadd_ps(vhalfrsqrtx4, vadjustment4, vsqrtx4);
105 const __m256 vy5 = _mm256_fmadd_ps(vhalfrsqrtx5, vadjustment5, vsqrtx5);
106 const __m256 vy6 = _mm256_fmadd_ps(vhalfrsqrtx6, vadjustment6, vsqrtx6);
107 const __m256 vy7 = _mm256_fmadd_ps(vhalfrsqrtx7, vadjustment7, vsqrtx7);
108
109 _mm256_storeu_ps(y, vy0);
110 _mm256_storeu_ps(y + 8, vy1);
111 _mm256_storeu_ps(y + 16, vy2);
112 _mm256_storeu_ps(y + 24, vy3);
113 _mm256_storeu_ps(y + 32, vy4);
114 _mm256_storeu_ps(y + 40, vy5);
115 _mm256_storeu_ps(y + 48, vy6);
116 _mm256_storeu_ps(y + 56, vy7);
117 y += 64;
118 }
119 for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
120 const __m256 vx = _mm256_loadu_ps(x);
121 x += 8;
122
123 const __m256 vrsqrtx = _mm256_rsqrt_ps(vx);
124 __m256 vsqrtx = _mm256_mul_ps(vrsqrtx, vx);
125 __m256 vhalfrsqrtx = _mm256_mul_ps(vrsqrtx, vhalf);
126 const __m256 vresidual = _mm256_fnmadd_ps(vsqrtx, vhalfrsqrtx, vhalf);
127 vhalfrsqrtx = _mm256_fmadd_ps(vhalfrsqrtx, vresidual, vhalfrsqrtx);
128 vsqrtx = _mm256_fmadd_ps(vsqrtx, vresidual, vsqrtx);
129 const __m256 vadjustment = _mm256_fnmadd_ps(vsqrtx, vsqrtx, vx);
130 const __m256 vy = _mm256_fmadd_ps(vhalfrsqrtx, vadjustment, vsqrtx);
131
132 _mm256_storeu_ps(y, vy);
133 y += 8;
134 }
135 if XNN_UNLIKELY(n != 0) {
136 assert(n >= 1 * sizeof(float));
137 assert(n <= 7 * sizeof(float));
138 const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->fma.mask_table[7] - n));
139
140 const __m256 vx = _mm256_maskload_ps(x, vmask);
141
142 const __m256 vrsqrtx = _mm256_rsqrt_ps(vx);
143 __m256 vsqrtx = _mm256_mul_ps(vrsqrtx, vx);
144 __m256 vhalfrsqrtx = _mm256_mul_ps(vrsqrtx, vhalf);
145 const __m256 vresidual = _mm256_fnmadd_ps(vsqrtx, vhalfrsqrtx, vhalf);
146 vhalfrsqrtx = _mm256_fmadd_ps(vhalfrsqrtx, vresidual, vhalfrsqrtx);
147 vsqrtx = _mm256_fmadd_ps(vsqrtx, vresidual, vsqrtx);
148 const __m256 vadjustment = _mm256_fnmadd_ps(vsqrtx, vsqrtx, vx);
149 const __m256 vy = _mm256_fmadd_ps(vhalfrsqrtx, vadjustment, vsqrtx);
150
151 __m128 vy_lo = _mm256_castps256_ps128(vy);
152 if (n & (4 * sizeof(float))) {
153 _mm_storeu_ps(y, vy_lo);
154 vy_lo = _mm256_extractf128_ps(vy, 1);
155 y += 4;
156 }
157 if (n & (2 * sizeof(float))) {
158 _mm_storel_pi((__m64*) y, vy_lo);
159 vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
160 y += 2;
161 }
162 if (n & (1 * sizeof(float))) {
163 _mm_store_ss(y, vy_lo);
164 }
165 }
166 }
167