xref: /aosp_15_r20/external/XNNPACK/src/qu8-f32-vcvt/gen/vcvt-neon-x32.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-f32-vcvt/neon.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 #include <xnnpack/vcvt.h>
17 
18 
xnn_qu8_f32_vcvt_ukernel__neon_x32(size_t n,const uint8_t * x,float * y,const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_qu8_f32_vcvt_ukernel__neon_x32(
20     size_t n,
21     const uint8_t* x,
22     float* y,
23     const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
24 {
25   assert(n != 0);
26   assert(n % sizeof(uint8_t) == 0);
27   assert(x != NULL);
28   assert(y != NULL);
29 
30   const int16x8_t vminus_zero_point = vreinterpretq_s16_u32(vld1q_dup_u32((const void*) params->neon.minus_zero_point));
31   const float32x4_t vscale = vld1q_dup_f32(&params->neon.scale);
32   for (; n >= 32 * sizeof(uint8_t); n -= 32 * sizeof(uint8_t)) {
33     const uint8x8_t vx01234567 = vld1_u8(x); x += 8;
34     const uint8x8_t vx89ABCDEF = vld1_u8(x); x += 8;
35     const uint8x8_t vxGHIJKLMN = vld1_u8(x); x += 8;
36     const uint8x8_t vxOPQRSTUV = vld1_u8(x); x += 8;
37 
38     const int16x8_t vhx01234567 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(vminus_zero_point), vx01234567));
39     const int16x8_t vhx89ABCDEF = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(vminus_zero_point), vx89ABCDEF));
40     const int16x8_t vhxGHIJKLMN = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(vminus_zero_point), vxGHIJKLMN));
41     const int16x8_t vhxOPQRSTUV = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(vminus_zero_point), vxOPQRSTUV));
42 
43     const int32x4_t vwx0123 = vmovl_s16(vget_low_s16(vhx01234567));
44     const int32x4_t vwx4567 = vmovl_s16(vget_high_s16(vhx01234567));
45     const int32x4_t vwx89AB = vmovl_s16(vget_low_s16(vhx89ABCDEF));
46     const int32x4_t vwxCDEF = vmovl_s16(vget_high_s16(vhx89ABCDEF));
47     const int32x4_t vwxGHIJ = vmovl_s16(vget_low_s16(vhxGHIJKLMN));
48     const int32x4_t vwxKLMN = vmovl_s16(vget_high_s16(vhxGHIJKLMN));
49     const int32x4_t vwxOPQR = vmovl_s16(vget_low_s16(vhxOPQRSTUV));
50     const int32x4_t vwxSTUV = vmovl_s16(vget_high_s16(vhxOPQRSTUV));
51 
52     float32x4_t vy0123 = vcvtq_f32_s32(vwx0123);
53     float32x4_t vy4567 = vcvtq_f32_s32(vwx4567);
54     float32x4_t vy89AB = vcvtq_f32_s32(vwx89AB);
55     float32x4_t vyCDEF = vcvtq_f32_s32(vwxCDEF);
56     float32x4_t vyGHIJ = vcvtq_f32_s32(vwxGHIJ);
57     float32x4_t vyKLMN = vcvtq_f32_s32(vwxKLMN);
58     float32x4_t vyOPQR = vcvtq_f32_s32(vwxOPQR);
59     float32x4_t vySTUV = vcvtq_f32_s32(vwxSTUV);
60 
61     vy0123 = vmulq_f32(vy0123, vscale);
62     vy4567 = vmulq_f32(vy4567, vscale);
63     vy89AB = vmulq_f32(vy89AB, vscale);
64     vyCDEF = vmulq_f32(vyCDEF, vscale);
65     vyGHIJ = vmulq_f32(vyGHIJ, vscale);
66     vyKLMN = vmulq_f32(vyKLMN, vscale);
67     vyOPQR = vmulq_f32(vyOPQR, vscale);
68     vySTUV = vmulq_f32(vySTUV, vscale);
69 
70     vst1q_f32(y, vy0123); y += 4;
71     vst1q_f32(y, vy4567); y += 4;
72     vst1q_f32(y, vy89AB); y += 4;
73     vst1q_f32(y, vyCDEF); y += 4;
74     vst1q_f32(y, vyGHIJ); y += 4;
75     vst1q_f32(y, vyKLMN); y += 4;
76     vst1q_f32(y, vyOPQR); y += 4;
77     vst1q_f32(y, vySTUV); y += 4;
78   }
79   for (; n >= 8 * sizeof(uint8_t); n -= 8 * sizeof(uint8_t)) {
80     const uint8x8_t vx = vld1_u8(x); x += 8;
81 
82     const int16x8_t vhx = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(vminus_zero_point), vx));
83 
84     const int32x4_t vwx_lo = vmovl_s16(vget_low_s16(vhx));
85     const int32x4_t vwx_hi = vmovl_s16(vget_high_s16(vhx));
86 
87     float32x4_t vy_lo = vcvtq_f32_s32(vwx_lo);
88     float32x4_t vy_hi = vcvtq_f32_s32(vwx_hi);
89 
90     vy_lo = vmulq_f32(vy_lo, vscale);
91     vy_hi = vmulq_f32(vy_hi, vscale);
92 
93     vst1q_f32(y, vy_lo); y += 4;
94     vst1q_f32(y, vy_hi); y += 4;
95   }
96   if XNN_UNLIKELY(n != 0) {
97     assert(n >= 1 * sizeof(uint8_t));
98     assert(n <= 7 * sizeof(uint8_t));
99 
100     const uint8x8_t vx = vld1_u8(x);
101 
102     const int16x8_t vhx = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(vminus_zero_point), vx));
103 
104     const int32x4_t vwx_lo = vmovl_s16(vget_low_s16(vhx));
105     const int32x4_t vwx_hi = vmovl_s16(vget_high_s16(vhx));
106 
107     float32x4_t vy = vcvtq_f32_s32(vwx_lo);
108     vy = vmulq_f32(vy, vscale);
109 
110     if (n & (4 * sizeof(uint8_t))) {
111       vst1q_f32(y, vy); y += 4;
112       vy = vcvtq_f32_s32(vwx_hi);
113       vy = vmulq_f32(vy, vscale);
114     }
115     float32x2_t vy_lo = vget_low_f32(vy);
116     if (n & (2 * sizeof(uint8_t))) {
117       vst1_f32(y, vy_lo); y += 2;
118       vy_lo = vget_high_f32(vy);
119     }
120     if (n & (1 * sizeof(uint8_t))) {
121       vst1_lane_f32(y, vy_lo, 0);
122     }
123   }
124 }
125