xref: /aosp_15_r20/external/XNNPACK/src/f16-f32-vcvt/gen/vcvt-neon-int32-x24.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f16-f32-vcvt/neon-int32.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/vcvt.h>
16 
17 
xnn_f16_f32_vcvt_ukernel__neon_int32_x24(size_t n,const void * input,float * output,const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f16_f32_vcvt_ukernel__neon_int32_x24(
19     size_t n,
20     const void* input,
21     float* output,
22     const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
23 {
24   assert(n != 0);
25   assert(n % sizeof(uint16_t) == 0);
26   assert(input != NULL);
27   assert(output != NULL);
28 
29   const uint32x4_t vsign_mask = vmovq_n_u32(0x80000000);
30   const uint32x4_t vexp_offset = vmovq_n_u32(0x70000000);
31   const float32x4_t vexp_scale = vld1q_dup_f32(&params->neon.exp_scale);
32   const uint32x4_t vmagic_bias = vmovq_n_u32(0x3F000000);
33   const uint32x4_t vdenorm_cutoff = vmovq_n_u32(0x04000000);
34 
35   const uint16_t* i = (const uint16_t*) input;
36   for (; n >= 24 * sizeof(uint16_t); n -= 24 * sizeof(uint16_t)) {
37     const uint16x8_t vh0 = vld1q_u16(i); i += 8;
38     const uint16x8_t vh1 = vld1q_u16(i); i += 8;
39     const uint16x8_t vh2 = vld1q_u16(i); i += 8;
40 
41     const uint32x4_t vw0 = vshll_n_u16(vget_low_u16(vh0), 16);
42     const uint32x4_t vw1 = vshll_n_u16(vget_high_u16(vh0), 16);
43     const uint32x4_t vw2 = vshll_n_u16(vget_low_u16(vh1), 16);
44     const uint32x4_t vw3 = vshll_n_u16(vget_high_u16(vh1), 16);
45     const uint32x4_t vw4 = vshll_n_u16(vget_low_u16(vh2), 16);
46     const uint32x4_t vw5 = vshll_n_u16(vget_high_u16(vh2), 16);
47 
48     const uint32x4_t vsign0 = vandq_u32(vw0, vsign_mask);
49     const uint32x4_t vsign1 = vandq_u32(vw1, vsign_mask);
50     const uint32x4_t vsign2 = vandq_u32(vw2, vsign_mask);
51     const uint32x4_t vsign3 = vandq_u32(vw3, vsign_mask);
52     const uint32x4_t vsign4 = vandq_u32(vw4, vsign_mask);
53     const uint32x4_t vsign5 = vandq_u32(vw5, vsign_mask);
54 
55     const uint32x4_t vnonsign0 = veorq_u32(vw0, vsign0);
56     const uint32x4_t vnonsign1 = veorq_u32(vw1, vsign1);
57     const uint32x4_t vnonsign2 = veorq_u32(vw2, vsign2);
58     const uint32x4_t vnonsign3 = veorq_u32(vw3, vsign3);
59     const uint32x4_t vnonsign4 = veorq_u32(vw4, vsign4);
60     const uint32x4_t vnonsign5 = veorq_u32(vw5, vsign5);
61 
62     const float32x4_t vnorm0 = vmulq_f32(vreinterpretq_f32_u32(vsraq_n_u32(vexp_offset, vnonsign0, 3)), vexp_scale);
63     const float32x4_t vnorm1 = vmulq_f32(vreinterpretq_f32_u32(vsraq_n_u32(vexp_offset, vnonsign1, 3)), vexp_scale);
64     const float32x4_t vnorm2 = vmulq_f32(vreinterpretq_f32_u32(vsraq_n_u32(vexp_offset, vnonsign2, 3)), vexp_scale);
65     const float32x4_t vnorm3 = vmulq_f32(vreinterpretq_f32_u32(vsraq_n_u32(vexp_offset, vnonsign3, 3)), vexp_scale);
66     const float32x4_t vnorm4 = vmulq_f32(vreinterpretq_f32_u32(vsraq_n_u32(vexp_offset, vnonsign4, 3)), vexp_scale);
67     const float32x4_t vnorm5 = vmulq_f32(vreinterpretq_f32_u32(vsraq_n_u32(vexp_offset, vnonsign5, 3)), vexp_scale);
68 
69     const float32x4_t vdenorm0 = vsubq_f32(vreinterpretq_f32_u32(vsriq_n_u32(vmagic_bias, vnonsign0, 16)), vreinterpretq_f32_u32(vmagic_bias));
70     const float32x4_t vdenorm1 = vsubq_f32(vreinterpretq_f32_u32(vsriq_n_u32(vmagic_bias, vnonsign1, 16)), vreinterpretq_f32_u32(vmagic_bias));
71     const float32x4_t vdenorm2 = vsubq_f32(vreinterpretq_f32_u32(vsriq_n_u32(vmagic_bias, vnonsign2, 16)), vreinterpretq_f32_u32(vmagic_bias));
72     const float32x4_t vdenorm3 = vsubq_f32(vreinterpretq_f32_u32(vsriq_n_u32(vmagic_bias, vnonsign3, 16)), vreinterpretq_f32_u32(vmagic_bias));
73     const float32x4_t vdenorm4 = vsubq_f32(vreinterpretq_f32_u32(vsriq_n_u32(vmagic_bias, vnonsign4, 16)), vreinterpretq_f32_u32(vmagic_bias));
74     const float32x4_t vdenorm5 = vsubq_f32(vreinterpretq_f32_u32(vsriq_n_u32(vmagic_bias, vnonsign5, 16)), vreinterpretq_f32_u32(vmagic_bias));
75 
76     const uint32x4_t vxmask0 = vcgtq_u32(vnonsign0, vdenorm_cutoff);
77     const uint32x4_t vxmask1 = vcgtq_u32(vnonsign1, vdenorm_cutoff);
78     const uint32x4_t vxmask2 = vcgtq_u32(vnonsign2, vdenorm_cutoff);
79     const uint32x4_t vxmask3 = vcgtq_u32(vnonsign3, vdenorm_cutoff);
80     const uint32x4_t vxmask4 = vcgtq_u32(vnonsign4, vdenorm_cutoff);
81     const uint32x4_t vxmask5 = vcgtq_u32(vnonsign5, vdenorm_cutoff);
82 
83     const uint32x4_t vf0 = vorrq_u32(vsign0, vreinterpretq_u32_f32(vbslq_f32(vxmask0, vnorm0, vdenorm0)));
84     const uint32x4_t vf1 = vorrq_u32(vsign1, vreinterpretq_u32_f32(vbslq_f32(vxmask1, vnorm1, vdenorm1)));
85     const uint32x4_t vf2 = vorrq_u32(vsign2, vreinterpretq_u32_f32(vbslq_f32(vxmask2, vnorm2, vdenorm2)));
86     const uint32x4_t vf3 = vorrq_u32(vsign3, vreinterpretq_u32_f32(vbslq_f32(vxmask3, vnorm3, vdenorm3)));
87     const uint32x4_t vf4 = vorrq_u32(vsign4, vreinterpretq_u32_f32(vbslq_f32(vxmask4, vnorm4, vdenorm4)));
88     const uint32x4_t vf5 = vorrq_u32(vsign5, vreinterpretq_u32_f32(vbslq_f32(vxmask5, vnorm5, vdenorm5)));
89 
90     vst1q_f32(output, vreinterpretq_f32_u32(vf0)); output += 4;
91     vst1q_f32(output, vreinterpretq_f32_u32(vf1)); output += 4;
92     vst1q_f32(output, vreinterpretq_f32_u32(vf2)); output += 4;
93     vst1q_f32(output, vreinterpretq_f32_u32(vf3)); output += 4;
94     vst1q_f32(output, vreinterpretq_f32_u32(vf4)); output += 4;
95     vst1q_f32(output, vreinterpretq_f32_u32(vf5)); output += 4;
96   }
97   for (; n >= 8 * sizeof(uint16_t); n -= 8 * sizeof(uint16_t)) {
98     const uint16x8_t vh = vld1q_u16(i); i += 8;
99 
100     const uint32x4_t vw_lo = vshll_n_u16(vget_low_u16(vh), 16);
101     const uint32x4_t vw_hi = vshll_n_u16(vget_high_u16(vh), 16);
102 
103     const uint32x4_t vsign_lo = vandq_u32(vw_lo, vsign_mask);
104     const uint32x4_t vsign_hi = vandq_u32(vw_hi, vsign_mask);
105 
106     const uint32x4_t vnonsign_lo = veorq_u32(vw_lo, vsign_lo);
107     const uint32x4_t vnonsign_hi = veorq_u32(vw_hi, vsign_hi);
108 
109     const float32x4_t vnorm_lo = vmulq_f32(vreinterpretq_f32_u32(vsraq_n_u32(vexp_offset, vnonsign_lo, 3)), vexp_scale);
110     const float32x4_t vnorm_hi = vmulq_f32(vreinterpretq_f32_u32(vsraq_n_u32(vexp_offset, vnonsign_hi, 3)), vexp_scale);
111 
112     const float32x4_t vdenorm_lo = vsubq_f32(vreinterpretq_f32_u32(vsriq_n_u32(vmagic_bias, vnonsign_lo, 16)), vreinterpretq_f32_u32(vmagic_bias));
113     const float32x4_t vdenorm_hi = vsubq_f32(vreinterpretq_f32_u32(vsriq_n_u32(vmagic_bias, vnonsign_hi, 16)), vreinterpretq_f32_u32(vmagic_bias));
114 
115     const uint32x4_t vxmask_lo = vcgtq_u32(vnonsign_lo, vdenorm_cutoff);
116     const uint32x4_t vf_lo = vorrq_u32(vsign_lo, vreinterpretq_u32_f32(vbslq_f32(vxmask_lo, vnorm_lo, vdenorm_lo)));
117 
118     const uint32x4_t vxmask_hi = vcgtq_u32(vnonsign_hi, vdenorm_cutoff);
119     const uint32x4_t vf_hi = vorrq_u32(vsign_hi, vreinterpretq_u32_f32(vbslq_f32(vxmask_hi, vnorm_hi, vdenorm_hi)));
120 
121     vst1q_f32(output, vreinterpretq_f32_u32(vf_lo)); output += 4;
122     vst1q_f32(output, vreinterpretq_f32_u32(vf_hi)); output += 4;
123   }
124   if XNN_UNPREDICTABLE(n != 0) {
125     const uint16x8_t vh = vld1q_u16(i); i += 8;
126 
127     const uint32x4_t vw_lo = vshll_n_u16(vget_low_u16(vh), 16);
128     const uint32x4_t vw_hi = vshll_n_u16(vget_high_u16(vh), 16);
129 
130     const uint32x4_t vsign_lo = vandq_u32(vw_lo, vsign_mask);
131     const uint32x4_t vsign_hi = vandq_u32(vw_hi, vsign_mask);
132 
133     const uint32x4_t vnonsign_lo = veorq_u32(vw_lo, vsign_lo);
134     const uint32x4_t vnonsign_hi = veorq_u32(vw_hi, vsign_hi);
135 
136     const float32x4_t vnorm_lo = vmulq_f32(vreinterpretq_f32_u32(vsraq_n_u32(vexp_offset, vnonsign_lo, 3)), vexp_scale);
137     const float32x4_t vnorm_hi = vmulq_f32(vreinterpretq_f32_u32(vsraq_n_u32(vexp_offset, vnonsign_hi, 3)), vexp_scale);
138 
139     const float32x4_t vdenorm_lo = vsubq_f32(vreinterpretq_f32_u32(vsriq_n_u32(vmagic_bias, vnonsign_lo, 16)), vreinterpretq_f32_u32(vmagic_bias));
140     const float32x4_t vdenorm_hi = vsubq_f32(vreinterpretq_f32_u32(vsriq_n_u32(vmagic_bias, vnonsign_hi, 16)), vreinterpretq_f32_u32(vmagic_bias));
141 
142     const uint32x4_t vxmask_lo = vcgtq_u32(vnonsign_lo, vdenorm_cutoff);
143     uint32x4_t vf = vorrq_u32(vsign_lo, vreinterpretq_u32_f32(vbslq_f32(vxmask_lo, vnorm_lo, vdenorm_lo)));
144 
145     if (n & (4 * sizeof(uint16_t))) {
146       vst1q_f32(output, vreinterpretq_f32_u32(vf)); output += 4;
147 
148       const uint32x4_t vxmask_hi = vcgtq_u32(vnonsign_hi, vdenorm_cutoff);
149       vf = vorrq_u32(vsign_hi, vreinterpretq_u32_f32(vbslq_f32(vxmask_hi, vnorm_hi, vdenorm_hi)));
150     }
151     uint32x2_t vf_lo = vget_low_u32(vf);
152     if (n & (2 * sizeof(uint16_t))) {
153       vst1_f32(output, vreinterpret_f32_u32(vf_lo)); output += 2;
154       vf_lo = vget_high_u32(vf);
155     }
156     if (n & (1 * sizeof(uint16_t))) {
157       vst1_lane_f32(output, vreinterpret_f32_u32(vf_lo), 0);
158     }
159   }
160 }
161