xref: /aosp_15_r20/external/XNNPACK/src/f32-vrnd/gen/vrndu-neon-x4.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-vrnd/vrndu-neon.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/math.h>
16 #include <xnnpack/vunary.h>
17 
18 
xnn_f32_vrndu_ukernel__neon_x4(size_t n,const float * x,float * y,const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_vrndu_ukernel__neon_x4(
20     size_t n,
21     const float* x,
22     float* y,
23     const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
24 {
25   assert(n != 0);
26   assert(n % sizeof(float) == 0);
27 
28   const float32x4_t vintegral_threshold = vreinterpretq_f32_u32(vmovq_n_u32(UINT32_C(0x4B000000)));
29   const float32x4_t vone = vmovq_n_f32(1.0f);
30   for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
31     const float32x4_t vx0123 = vld1q_f32(x); x += 4;
32 
33     const int32x4_t vintx0123 = vcvtq_s32_f32(vx0123);
34 
35     uint32x4_t vrndmask0123 = vcaltq_f32(vx0123, vintegral_threshold);
36 
37     const float32x4_t vprerndx0123 = vcvtq_f32_s32(vintx0123);
38 
39     vrndmask0123 = vbicq_u32(vrndmask0123, vmovq_n_u32(UINT32_C(0x80000000)));
40 
41     const float32x4_t vrndx0123 = vbslq_f32(vrndmask0123, vprerndx0123, vx0123);
42 
43     uint32x4_t vadjmask0123 = vcgeq_f32(vrndx0123, vx0123);
44 
45     const float32x4_t vadjrndx0123 = vaddq_f32(vrndx0123, vone);
46 
47     vadjmask0123 = vorrq_u32(vadjmask0123, vmovq_n_u32(UINT32_C(0x80000000)));
48 
49     const float32x4_t vy0123 = vbslq_f32(vadjmask0123, vrndx0123, vadjrndx0123);
50 
51     vst1q_f32(y, vy0123); y += 4;
52   }
53   if XNN_UNLIKELY(n != 0) {
54     const float32x4_t vx = vld1q_f32(x);
55     const int32x4_t vintx = vcvtq_s32_f32(vx);
56     const float32x4_t vprerndx = vcvtq_f32_s32(vintx);
57     uint32x4_t vrndmask = vcaltq_f32(vx, vintegral_threshold);
58     vrndmask = vbicq_u32(vrndmask, vmovq_n_u32(UINT32_C(0x80000000)));
59     const float32x4_t vrndx = vbslq_f32(vrndmask, vprerndx, vx);
60     uint32x4_t vadjmask = vcgeq_f32(vrndx, vx);
61     const float32x4_t vadjrndx = vaddq_f32(vrndx, vone);
62     vadjmask = vorrq_u32(vadjmask, vmovq_n_u32(UINT32_C(0x80000000)));
63     const float32x4_t vy = vbslq_f32(vadjmask, vrndx, vadjrndx);
64     float32x2_t vy_lo = vget_low_f32(vy);
65     if (n & (2 * sizeof(float))) {
66       vst1_f32(y, vy_lo); y += 2;
67       vy_lo = vget_high_f32(vy);
68     }
69     if (n & (1 * sizeof(float))) {
70       vst1_lane_f32(y, vy_lo, 0);
71     }
72   }
73 }
74