xref: /aosp_15_r20/external/XNNPACK/src/f32-vsqrt/gen/neonfma-nr2fma1adj-x12.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-vsqrt/neonfma-nr2fma1adj.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 #include <math.h>
12 
13 #include <arm_neon.h>
14 
15 #include <xnnpack/common.h>
16 #include <xnnpack/vunary.h>
17 
18 
xnn_f32_vsqrt_ukernel__neonfma_nr2fma1adj_x12(size_t n,const float * x,float * y,const union xnn_f32_sqrt_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_vsqrt_ukernel__neonfma_nr2fma1adj_x12(
20     size_t n,
21     const float* x,
22     float* y,
23     const union xnn_f32_sqrt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
24 {
25   assert(n != 0);
26   assert(n % sizeof(float) == 0);
27 
28   const float32x4_t vhalf = vmovq_n_f32(0.5f);
29   for (; n >= 12 * sizeof(float); n -= 12 * sizeof(float)) {
30     const float32x4_t vx0123 = vld1q_f32(x); x += 4;
31     const float32x4_t vx4567 = vld1q_f32(x); x += 4;
32     const float32x4_t vx89AB = vld1q_f32(x); x += 4;
33 
34     const float32x4_t vrsqrtx0123 = vrsqrteq_f32(vx0123);
35     const float32x4_t vrsqrtx4567 = vrsqrteq_f32(vx4567);
36     const float32x4_t vrsqrtx89AB = vrsqrteq_f32(vx89AB);
37 
38     float32x4_t vsqrtx0123 = vmulq_f32(vrsqrtx0123, vx0123);
39     float32x4_t vhalfrsqrtx0123 = vmulq_f32(vrsqrtx0123, vhalf);
40     float32x4_t vsqrtx4567 = vmulq_f32(vrsqrtx4567, vx4567);
41     float32x4_t vhalfrsqrtx4567 = vmulq_f32(vrsqrtx4567, vhalf);
42     float32x4_t vsqrtx89AB = vmulq_f32(vrsqrtx89AB, vx89AB);
43     float32x4_t vhalfrsqrtx89AB = vmulq_f32(vrsqrtx89AB, vhalf);
44 
45     float32x4_t vresidual0123 = vfmsq_f32(vhalf, vsqrtx0123, vhalfrsqrtx0123);
46     float32x4_t vresidual4567 = vfmsq_f32(vhalf, vsqrtx4567, vhalfrsqrtx4567);
47     float32x4_t vresidual89AB = vfmsq_f32(vhalf, vsqrtx89AB, vhalfrsqrtx89AB);
48 
49     vhalfrsqrtx0123 = vfmaq_f32(vhalfrsqrtx0123, vresidual0123, vhalfrsqrtx0123);
50     vsqrtx0123 = vfmaq_f32(vsqrtx0123, vresidual0123, vsqrtx0123);
51     vhalfrsqrtx4567 = vfmaq_f32(vhalfrsqrtx4567, vresidual4567, vhalfrsqrtx4567);
52     vsqrtx4567 = vfmaq_f32(vsqrtx4567, vresidual4567, vsqrtx4567);
53     vhalfrsqrtx89AB = vfmaq_f32(vhalfrsqrtx89AB, vresidual89AB, vhalfrsqrtx89AB);
54     vsqrtx89AB = vfmaq_f32(vsqrtx89AB, vresidual89AB, vsqrtx89AB);
55 
56     vresidual0123 = vfmsq_f32(vhalf, vsqrtx0123, vhalfrsqrtx0123);
57     vresidual4567 = vfmsq_f32(vhalf, vsqrtx4567, vhalfrsqrtx4567);
58     vresidual89AB = vfmsq_f32(vhalf, vsqrtx89AB, vhalfrsqrtx89AB);
59 
60     vhalfrsqrtx0123 = vfmaq_f32(vhalfrsqrtx0123, vresidual0123, vhalfrsqrtx0123);
61     vsqrtx0123 = vfmaq_f32(vsqrtx0123, vresidual0123, vsqrtx0123);
62     vhalfrsqrtx4567 = vfmaq_f32(vhalfrsqrtx4567, vresidual4567, vhalfrsqrtx4567);
63     vsqrtx4567 = vfmaq_f32(vsqrtx4567, vresidual4567, vsqrtx4567);
64     vhalfrsqrtx89AB = vfmaq_f32(vhalfrsqrtx89AB, vresidual89AB, vhalfrsqrtx89AB);
65     vsqrtx89AB = vfmaq_f32(vsqrtx89AB, vresidual89AB, vsqrtx89AB);
66 
67     const float32x4_t vadjustment0123 = vfmsq_f32(vx0123, vsqrtx0123, vsqrtx0123);
68     const float32x4_t vadjustment4567 = vfmsq_f32(vx4567, vsqrtx4567, vsqrtx4567);
69     const float32x4_t vadjustment89AB = vfmsq_f32(vx89AB, vsqrtx89AB, vsqrtx89AB);
70 
71     const float32x4_t vy0123 = vfmaq_f32(vsqrtx0123, vhalfrsqrtx0123, vadjustment0123);
72     const float32x4_t vy4567 = vfmaq_f32(vsqrtx4567, vhalfrsqrtx4567, vadjustment4567);
73     const float32x4_t vy89AB = vfmaq_f32(vsqrtx89AB, vhalfrsqrtx89AB, vadjustment89AB);
74 
75     vst1q_f32(y, vy0123); y += 4;
76     vst1q_f32(y, vy4567); y += 4;
77     vst1q_f32(y, vy89AB); y += 4;
78   }
79   for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
80     const float32x4_t vx = vld1q_f32(x); x += 4;
81     const float32x4_t vrsqrtx = vrsqrteq_f32(vx);
82     float32x4_t vsqrtx = vmulq_f32(vrsqrtx, vx);
83     float32x4_t vhalfrsqrtx = vmulq_f32(vrsqrtx, vhalf);
84     float32x4_t vresidual = vfmsq_f32(vhalf, vsqrtx, vhalfrsqrtx);
85     vhalfrsqrtx = vfmaq_f32(vhalfrsqrtx, vresidual, vhalfrsqrtx);
86     vsqrtx = vfmaq_f32(vsqrtx, vresidual, vsqrtx);
87     vresidual = vfmsq_f32(vhalf, vsqrtx, vhalfrsqrtx);
88     vhalfrsqrtx = vfmaq_f32(vhalfrsqrtx, vresidual, vhalfrsqrtx);
89     vsqrtx = vfmaq_f32(vsqrtx, vresidual, vsqrtx);
90     const float32x4_t vadjustment = vfmsq_f32(vx, vsqrtx, vsqrtx);
91     const float32x4_t vy = vfmaq_f32(vsqrtx, vhalfrsqrtx, vadjustment);
92     vst1q_f32(y, vy); y += 4;
93   }
94   if XNN_UNLIKELY(n != 0) {
95     const float32x4_t vx = vld1q_f32(x);
96     const float32x4_t vrsqrtx = vrsqrteq_f32(vx);
97     float32x4_t vsqrtx = vmulq_f32(vrsqrtx, vx);
98     float32x4_t vhalfrsqrtx = vmulq_f32(vrsqrtx, vhalf);
99     float32x4_t vresidual = vfmsq_f32(vhalf, vsqrtx, vhalfrsqrtx);
100     vhalfrsqrtx = vfmaq_f32(vhalfrsqrtx, vresidual, vhalfrsqrtx);
101     vsqrtx = vfmaq_f32(vsqrtx, vresidual, vsqrtx);
102     vresidual = vfmsq_f32(vhalf, vsqrtx, vhalfrsqrtx);
103     vhalfrsqrtx = vfmaq_f32(vhalfrsqrtx, vresidual, vhalfrsqrtx);
104     vsqrtx = vfmaq_f32(vsqrtx, vresidual, vsqrtx);
105     const float32x4_t vadjustment = vfmsq_f32(vx, vsqrtx, vsqrtx);
106     const float32x4_t vy = vfmaq_f32(vsqrtx, vhalfrsqrtx, vadjustment);
107 
108     float32x2_t vy_lo = vget_low_f32(vy);
109     if (n & (2 * sizeof(float))) {
110       vst1_f32(y, vy_lo); y += 2;
111       vy_lo = vget_high_f32(vy);
112     }
113     if (n & (1 * sizeof(float))) {
114       vst1_lane_f32(y, vy_lo, 0);
115     }
116   }
117 }
118