1 // Auto-generated file. Do not edit!
2 // Template: src/f32-vsqrt/neonfma-nr2fma1adj.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11 #include <math.h>
12
13 #include <arm_neon.h>
14
15 #include <xnnpack/common.h>
16 #include <xnnpack/vunary.h>
17
18
xnn_f32_vsqrt_ukernel__neonfma_nr2fma1adj_x24(size_t n,const float * x,float * y,const union xnn_f32_sqrt_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_vsqrt_ukernel__neonfma_nr2fma1adj_x24(
20 size_t n,
21 const float* x,
22 float* y,
23 const union xnn_f32_sqrt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
24 {
25 assert(n != 0);
26 assert(n % sizeof(float) == 0);
27
28 const float32x4_t vhalf = vmovq_n_f32(0.5f);
29 for (; n >= 24 * sizeof(float); n -= 24 * sizeof(float)) {
30 const float32x4_t vx0123 = vld1q_f32(x); x += 4;
31 const float32x4_t vx4567 = vld1q_f32(x); x += 4;
32 const float32x4_t vx89AB = vld1q_f32(x); x += 4;
33 const float32x4_t vxCDEF = vld1q_f32(x); x += 4;
34 const float32x4_t vxGHIJ = vld1q_f32(x); x += 4;
35 const float32x4_t vxKLMN = vld1q_f32(x); x += 4;
36
37 const float32x4_t vrsqrtx0123 = vrsqrteq_f32(vx0123);
38 const float32x4_t vrsqrtx4567 = vrsqrteq_f32(vx4567);
39 const float32x4_t vrsqrtx89AB = vrsqrteq_f32(vx89AB);
40 const float32x4_t vrsqrtxCDEF = vrsqrteq_f32(vxCDEF);
41 const float32x4_t vrsqrtxGHIJ = vrsqrteq_f32(vxGHIJ);
42 const float32x4_t vrsqrtxKLMN = vrsqrteq_f32(vxKLMN);
43
44 float32x4_t vsqrtx0123 = vmulq_f32(vrsqrtx0123, vx0123);
45 float32x4_t vhalfrsqrtx0123 = vmulq_f32(vrsqrtx0123, vhalf);
46 float32x4_t vsqrtx4567 = vmulq_f32(vrsqrtx4567, vx4567);
47 float32x4_t vhalfrsqrtx4567 = vmulq_f32(vrsqrtx4567, vhalf);
48 float32x4_t vsqrtx89AB = vmulq_f32(vrsqrtx89AB, vx89AB);
49 float32x4_t vhalfrsqrtx89AB = vmulq_f32(vrsqrtx89AB, vhalf);
50 float32x4_t vsqrtxCDEF = vmulq_f32(vrsqrtxCDEF, vxCDEF);
51 float32x4_t vhalfrsqrtxCDEF = vmulq_f32(vrsqrtxCDEF, vhalf);
52 float32x4_t vsqrtxGHIJ = vmulq_f32(vrsqrtxGHIJ, vxGHIJ);
53 float32x4_t vhalfrsqrtxGHIJ = vmulq_f32(vrsqrtxGHIJ, vhalf);
54 float32x4_t vsqrtxKLMN = vmulq_f32(vrsqrtxKLMN, vxKLMN);
55 float32x4_t vhalfrsqrtxKLMN = vmulq_f32(vrsqrtxKLMN, vhalf);
56
57 float32x4_t vresidual0123 = vfmsq_f32(vhalf, vsqrtx0123, vhalfrsqrtx0123);
58 float32x4_t vresidual4567 = vfmsq_f32(vhalf, vsqrtx4567, vhalfrsqrtx4567);
59 float32x4_t vresidual89AB = vfmsq_f32(vhalf, vsqrtx89AB, vhalfrsqrtx89AB);
60 float32x4_t vresidualCDEF = vfmsq_f32(vhalf, vsqrtxCDEF, vhalfrsqrtxCDEF);
61 float32x4_t vresidualGHIJ = vfmsq_f32(vhalf, vsqrtxGHIJ, vhalfrsqrtxGHIJ);
62 float32x4_t vresidualKLMN = vfmsq_f32(vhalf, vsqrtxKLMN, vhalfrsqrtxKLMN);
63
64 vhalfrsqrtx0123 = vfmaq_f32(vhalfrsqrtx0123, vresidual0123, vhalfrsqrtx0123);
65 vsqrtx0123 = vfmaq_f32(vsqrtx0123, vresidual0123, vsqrtx0123);
66 vhalfrsqrtx4567 = vfmaq_f32(vhalfrsqrtx4567, vresidual4567, vhalfrsqrtx4567);
67 vsqrtx4567 = vfmaq_f32(vsqrtx4567, vresidual4567, vsqrtx4567);
68 vhalfrsqrtx89AB = vfmaq_f32(vhalfrsqrtx89AB, vresidual89AB, vhalfrsqrtx89AB);
69 vsqrtx89AB = vfmaq_f32(vsqrtx89AB, vresidual89AB, vsqrtx89AB);
70 vhalfrsqrtxCDEF = vfmaq_f32(vhalfrsqrtxCDEF, vresidualCDEF, vhalfrsqrtxCDEF);
71 vsqrtxCDEF = vfmaq_f32(vsqrtxCDEF, vresidualCDEF, vsqrtxCDEF);
72 vhalfrsqrtxGHIJ = vfmaq_f32(vhalfrsqrtxGHIJ, vresidualGHIJ, vhalfrsqrtxGHIJ);
73 vsqrtxGHIJ = vfmaq_f32(vsqrtxGHIJ, vresidualGHIJ, vsqrtxGHIJ);
74 vhalfrsqrtxKLMN = vfmaq_f32(vhalfrsqrtxKLMN, vresidualKLMN, vhalfrsqrtxKLMN);
75 vsqrtxKLMN = vfmaq_f32(vsqrtxKLMN, vresidualKLMN, vsqrtxKLMN);
76
77 vresidual0123 = vfmsq_f32(vhalf, vsqrtx0123, vhalfrsqrtx0123);
78 vresidual4567 = vfmsq_f32(vhalf, vsqrtx4567, vhalfrsqrtx4567);
79 vresidual89AB = vfmsq_f32(vhalf, vsqrtx89AB, vhalfrsqrtx89AB);
80 vresidualCDEF = vfmsq_f32(vhalf, vsqrtxCDEF, vhalfrsqrtxCDEF);
81 vresidualGHIJ = vfmsq_f32(vhalf, vsqrtxGHIJ, vhalfrsqrtxGHIJ);
82 vresidualKLMN = vfmsq_f32(vhalf, vsqrtxKLMN, vhalfrsqrtxKLMN);
83
84 vhalfrsqrtx0123 = vfmaq_f32(vhalfrsqrtx0123, vresidual0123, vhalfrsqrtx0123);
85 vsqrtx0123 = vfmaq_f32(vsqrtx0123, vresidual0123, vsqrtx0123);
86 vhalfrsqrtx4567 = vfmaq_f32(vhalfrsqrtx4567, vresidual4567, vhalfrsqrtx4567);
87 vsqrtx4567 = vfmaq_f32(vsqrtx4567, vresidual4567, vsqrtx4567);
88 vhalfrsqrtx89AB = vfmaq_f32(vhalfrsqrtx89AB, vresidual89AB, vhalfrsqrtx89AB);
89 vsqrtx89AB = vfmaq_f32(vsqrtx89AB, vresidual89AB, vsqrtx89AB);
90 vhalfrsqrtxCDEF = vfmaq_f32(vhalfrsqrtxCDEF, vresidualCDEF, vhalfrsqrtxCDEF);
91 vsqrtxCDEF = vfmaq_f32(vsqrtxCDEF, vresidualCDEF, vsqrtxCDEF);
92 vhalfrsqrtxGHIJ = vfmaq_f32(vhalfrsqrtxGHIJ, vresidualGHIJ, vhalfrsqrtxGHIJ);
93 vsqrtxGHIJ = vfmaq_f32(vsqrtxGHIJ, vresidualGHIJ, vsqrtxGHIJ);
94 vhalfrsqrtxKLMN = vfmaq_f32(vhalfrsqrtxKLMN, vresidualKLMN, vhalfrsqrtxKLMN);
95 vsqrtxKLMN = vfmaq_f32(vsqrtxKLMN, vresidualKLMN, vsqrtxKLMN);
96
97 const float32x4_t vadjustment0123 = vfmsq_f32(vx0123, vsqrtx0123, vsqrtx0123);
98 const float32x4_t vadjustment4567 = vfmsq_f32(vx4567, vsqrtx4567, vsqrtx4567);
99 const float32x4_t vadjustment89AB = vfmsq_f32(vx89AB, vsqrtx89AB, vsqrtx89AB);
100 const float32x4_t vadjustmentCDEF = vfmsq_f32(vxCDEF, vsqrtxCDEF, vsqrtxCDEF);
101 const float32x4_t vadjustmentGHIJ = vfmsq_f32(vxGHIJ, vsqrtxGHIJ, vsqrtxGHIJ);
102 const float32x4_t vadjustmentKLMN = vfmsq_f32(vxKLMN, vsqrtxKLMN, vsqrtxKLMN);
103
104 const float32x4_t vy0123 = vfmaq_f32(vsqrtx0123, vhalfrsqrtx0123, vadjustment0123);
105 const float32x4_t vy4567 = vfmaq_f32(vsqrtx4567, vhalfrsqrtx4567, vadjustment4567);
106 const float32x4_t vy89AB = vfmaq_f32(vsqrtx89AB, vhalfrsqrtx89AB, vadjustment89AB);
107 const float32x4_t vyCDEF = vfmaq_f32(vsqrtxCDEF, vhalfrsqrtxCDEF, vadjustmentCDEF);
108 const float32x4_t vyGHIJ = vfmaq_f32(vsqrtxGHIJ, vhalfrsqrtxGHIJ, vadjustmentGHIJ);
109 const float32x4_t vyKLMN = vfmaq_f32(vsqrtxKLMN, vhalfrsqrtxKLMN, vadjustmentKLMN);
110
111 vst1q_f32(y, vy0123); y += 4;
112 vst1q_f32(y, vy4567); y += 4;
113 vst1q_f32(y, vy89AB); y += 4;
114 vst1q_f32(y, vyCDEF); y += 4;
115 vst1q_f32(y, vyGHIJ); y += 4;
116 vst1q_f32(y, vyKLMN); y += 4;
117 }
118 for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
119 const float32x4_t vx = vld1q_f32(x); x += 4;
120 const float32x4_t vrsqrtx = vrsqrteq_f32(vx);
121 float32x4_t vsqrtx = vmulq_f32(vrsqrtx, vx);
122 float32x4_t vhalfrsqrtx = vmulq_f32(vrsqrtx, vhalf);
123 float32x4_t vresidual = vfmsq_f32(vhalf, vsqrtx, vhalfrsqrtx);
124 vhalfrsqrtx = vfmaq_f32(vhalfrsqrtx, vresidual, vhalfrsqrtx);
125 vsqrtx = vfmaq_f32(vsqrtx, vresidual, vsqrtx);
126 vresidual = vfmsq_f32(vhalf, vsqrtx, vhalfrsqrtx);
127 vhalfrsqrtx = vfmaq_f32(vhalfrsqrtx, vresidual, vhalfrsqrtx);
128 vsqrtx = vfmaq_f32(vsqrtx, vresidual, vsqrtx);
129 const float32x4_t vadjustment = vfmsq_f32(vx, vsqrtx, vsqrtx);
130 const float32x4_t vy = vfmaq_f32(vsqrtx, vhalfrsqrtx, vadjustment);
131 vst1q_f32(y, vy); y += 4;
132 }
133 if XNN_UNLIKELY(n != 0) {
134 const float32x4_t vx = vld1q_f32(x);
135 const float32x4_t vrsqrtx = vrsqrteq_f32(vx);
136 float32x4_t vsqrtx = vmulq_f32(vrsqrtx, vx);
137 float32x4_t vhalfrsqrtx = vmulq_f32(vrsqrtx, vhalf);
138 float32x4_t vresidual = vfmsq_f32(vhalf, vsqrtx, vhalfrsqrtx);
139 vhalfrsqrtx = vfmaq_f32(vhalfrsqrtx, vresidual, vhalfrsqrtx);
140 vsqrtx = vfmaq_f32(vsqrtx, vresidual, vsqrtx);
141 vresidual = vfmsq_f32(vhalf, vsqrtx, vhalfrsqrtx);
142 vhalfrsqrtx = vfmaq_f32(vhalfrsqrtx, vresidual, vhalfrsqrtx);
143 vsqrtx = vfmaq_f32(vsqrtx, vresidual, vsqrtx);
144 const float32x4_t vadjustment = vfmsq_f32(vx, vsqrtx, vsqrtx);
145 const float32x4_t vy = vfmaq_f32(vsqrtx, vhalfrsqrtx, vadjustment);
146
147 float32x2_t vy_lo = vget_low_f32(vy);
148 if (n & (2 * sizeof(float))) {
149 vst1_f32(y, vy_lo); y += 2;
150 vy_lo = vget_high_f32(vy);
151 }
152 if (n & (1 * sizeof(float))) {
153 vst1_lane_f32(y, vy_lo, 0);
154 }
155 }
156 }
157