1 // Auto-generated file. Do not edit!
2 // Template: src/f16-f32-vcvt/neon-int32.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/vcvt.h>
16
17
xnn_f16_f32_vcvt_ukernel__neon_int32_x16(size_t n,const void * input,float * output,const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f16_f32_vcvt_ukernel__neon_int32_x16(
19 size_t n,
20 const void* input,
21 float* output,
22 const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
23 {
24 assert(n != 0);
25 assert(n % sizeof(uint16_t) == 0);
26 assert(input != NULL);
27 assert(output != NULL);
28
29 const uint32x4_t vsign_mask = vmovq_n_u32(0x80000000);
30 const uint32x4_t vexp_offset = vmovq_n_u32(0x70000000);
31 const float32x4_t vexp_scale = vld1q_dup_f32(¶ms->neon.exp_scale);
32 const uint32x4_t vmagic_bias = vmovq_n_u32(0x3F000000);
33 const uint32x4_t vdenorm_cutoff = vmovq_n_u32(0x04000000);
34
35 const uint16_t* i = (const uint16_t*) input;
36 for (; n >= 16 * sizeof(uint16_t); n -= 16 * sizeof(uint16_t)) {
37 const uint16x8_t vh0 = vld1q_u16(i); i += 8;
38 const uint16x8_t vh1 = vld1q_u16(i); i += 8;
39
40 const uint32x4_t vw0 = vshll_n_u16(vget_low_u16(vh0), 16);
41 const uint32x4_t vw1 = vshll_n_u16(vget_high_u16(vh0), 16);
42 const uint32x4_t vw2 = vshll_n_u16(vget_low_u16(vh1), 16);
43 const uint32x4_t vw3 = vshll_n_u16(vget_high_u16(vh1), 16);
44
45 const uint32x4_t vsign0 = vandq_u32(vw0, vsign_mask);
46 const uint32x4_t vsign1 = vandq_u32(vw1, vsign_mask);
47 const uint32x4_t vsign2 = vandq_u32(vw2, vsign_mask);
48 const uint32x4_t vsign3 = vandq_u32(vw3, vsign_mask);
49
50 const uint32x4_t vnonsign0 = veorq_u32(vw0, vsign0);
51 const uint32x4_t vnonsign1 = veorq_u32(vw1, vsign1);
52 const uint32x4_t vnonsign2 = veorq_u32(vw2, vsign2);
53 const uint32x4_t vnonsign3 = veorq_u32(vw3, vsign3);
54
55 const float32x4_t vnorm0 = vmulq_f32(vreinterpretq_f32_u32(vsraq_n_u32(vexp_offset, vnonsign0, 3)), vexp_scale);
56 const float32x4_t vnorm1 = vmulq_f32(vreinterpretq_f32_u32(vsraq_n_u32(vexp_offset, vnonsign1, 3)), vexp_scale);
57 const float32x4_t vnorm2 = vmulq_f32(vreinterpretq_f32_u32(vsraq_n_u32(vexp_offset, vnonsign2, 3)), vexp_scale);
58 const float32x4_t vnorm3 = vmulq_f32(vreinterpretq_f32_u32(vsraq_n_u32(vexp_offset, vnonsign3, 3)), vexp_scale);
59
60 const float32x4_t vdenorm0 = vsubq_f32(vreinterpretq_f32_u32(vsriq_n_u32(vmagic_bias, vnonsign0, 16)), vreinterpretq_f32_u32(vmagic_bias));
61 const float32x4_t vdenorm1 = vsubq_f32(vreinterpretq_f32_u32(vsriq_n_u32(vmagic_bias, vnonsign1, 16)), vreinterpretq_f32_u32(vmagic_bias));
62 const float32x4_t vdenorm2 = vsubq_f32(vreinterpretq_f32_u32(vsriq_n_u32(vmagic_bias, vnonsign2, 16)), vreinterpretq_f32_u32(vmagic_bias));
63 const float32x4_t vdenorm3 = vsubq_f32(vreinterpretq_f32_u32(vsriq_n_u32(vmagic_bias, vnonsign3, 16)), vreinterpretq_f32_u32(vmagic_bias));
64
65 const uint32x4_t vxmask0 = vcgtq_u32(vnonsign0, vdenorm_cutoff);
66 const uint32x4_t vxmask1 = vcgtq_u32(vnonsign1, vdenorm_cutoff);
67 const uint32x4_t vxmask2 = vcgtq_u32(vnonsign2, vdenorm_cutoff);
68 const uint32x4_t vxmask3 = vcgtq_u32(vnonsign3, vdenorm_cutoff);
69
70 const uint32x4_t vf0 = vorrq_u32(vsign0, vreinterpretq_u32_f32(vbslq_f32(vxmask0, vnorm0, vdenorm0)));
71 const uint32x4_t vf1 = vorrq_u32(vsign1, vreinterpretq_u32_f32(vbslq_f32(vxmask1, vnorm1, vdenorm1)));
72 const uint32x4_t vf2 = vorrq_u32(vsign2, vreinterpretq_u32_f32(vbslq_f32(vxmask2, vnorm2, vdenorm2)));
73 const uint32x4_t vf3 = vorrq_u32(vsign3, vreinterpretq_u32_f32(vbslq_f32(vxmask3, vnorm3, vdenorm3)));
74
75 vst1q_f32(output, vreinterpretq_f32_u32(vf0)); output += 4;
76 vst1q_f32(output, vreinterpretq_f32_u32(vf1)); output += 4;
77 vst1q_f32(output, vreinterpretq_f32_u32(vf2)); output += 4;
78 vst1q_f32(output, vreinterpretq_f32_u32(vf3)); output += 4;
79 }
80 for (; n >= 8 * sizeof(uint16_t); n -= 8 * sizeof(uint16_t)) {
81 const uint16x8_t vh = vld1q_u16(i); i += 8;
82
83 const uint32x4_t vw_lo = vshll_n_u16(vget_low_u16(vh), 16);
84 const uint32x4_t vw_hi = vshll_n_u16(vget_high_u16(vh), 16);
85
86 const uint32x4_t vsign_lo = vandq_u32(vw_lo, vsign_mask);
87 const uint32x4_t vsign_hi = vandq_u32(vw_hi, vsign_mask);
88
89 const uint32x4_t vnonsign_lo = veorq_u32(vw_lo, vsign_lo);
90 const uint32x4_t vnonsign_hi = veorq_u32(vw_hi, vsign_hi);
91
92 const float32x4_t vnorm_lo = vmulq_f32(vreinterpretq_f32_u32(vsraq_n_u32(vexp_offset, vnonsign_lo, 3)), vexp_scale);
93 const float32x4_t vnorm_hi = vmulq_f32(vreinterpretq_f32_u32(vsraq_n_u32(vexp_offset, vnonsign_hi, 3)), vexp_scale);
94
95 const float32x4_t vdenorm_lo = vsubq_f32(vreinterpretq_f32_u32(vsriq_n_u32(vmagic_bias, vnonsign_lo, 16)), vreinterpretq_f32_u32(vmagic_bias));
96 const float32x4_t vdenorm_hi = vsubq_f32(vreinterpretq_f32_u32(vsriq_n_u32(vmagic_bias, vnonsign_hi, 16)), vreinterpretq_f32_u32(vmagic_bias));
97
98 const uint32x4_t vxmask_lo = vcgtq_u32(vnonsign_lo, vdenorm_cutoff);
99 const uint32x4_t vf_lo = vorrq_u32(vsign_lo, vreinterpretq_u32_f32(vbslq_f32(vxmask_lo, vnorm_lo, vdenorm_lo)));
100
101 const uint32x4_t vxmask_hi = vcgtq_u32(vnonsign_hi, vdenorm_cutoff);
102 const uint32x4_t vf_hi = vorrq_u32(vsign_hi, vreinterpretq_u32_f32(vbslq_f32(vxmask_hi, vnorm_hi, vdenorm_hi)));
103
104 vst1q_f32(output, vreinterpretq_f32_u32(vf_lo)); output += 4;
105 vst1q_f32(output, vreinterpretq_f32_u32(vf_hi)); output += 4;
106 }
107 if XNN_UNPREDICTABLE(n != 0) {
108 const uint16x8_t vh = vld1q_u16(i); i += 8;
109
110 const uint32x4_t vw_lo = vshll_n_u16(vget_low_u16(vh), 16);
111 const uint32x4_t vw_hi = vshll_n_u16(vget_high_u16(vh), 16);
112
113 const uint32x4_t vsign_lo = vandq_u32(vw_lo, vsign_mask);
114 const uint32x4_t vsign_hi = vandq_u32(vw_hi, vsign_mask);
115
116 const uint32x4_t vnonsign_lo = veorq_u32(vw_lo, vsign_lo);
117 const uint32x4_t vnonsign_hi = veorq_u32(vw_hi, vsign_hi);
118
119 const float32x4_t vnorm_lo = vmulq_f32(vreinterpretq_f32_u32(vsraq_n_u32(vexp_offset, vnonsign_lo, 3)), vexp_scale);
120 const float32x4_t vnorm_hi = vmulq_f32(vreinterpretq_f32_u32(vsraq_n_u32(vexp_offset, vnonsign_hi, 3)), vexp_scale);
121
122 const float32x4_t vdenorm_lo = vsubq_f32(vreinterpretq_f32_u32(vsriq_n_u32(vmagic_bias, vnonsign_lo, 16)), vreinterpretq_f32_u32(vmagic_bias));
123 const float32x4_t vdenorm_hi = vsubq_f32(vreinterpretq_f32_u32(vsriq_n_u32(vmagic_bias, vnonsign_hi, 16)), vreinterpretq_f32_u32(vmagic_bias));
124
125 const uint32x4_t vxmask_lo = vcgtq_u32(vnonsign_lo, vdenorm_cutoff);
126 uint32x4_t vf = vorrq_u32(vsign_lo, vreinterpretq_u32_f32(vbslq_f32(vxmask_lo, vnorm_lo, vdenorm_lo)));
127
128 if (n & (4 * sizeof(uint16_t))) {
129 vst1q_f32(output, vreinterpretq_f32_u32(vf)); output += 4;
130
131 const uint32x4_t vxmask_hi = vcgtq_u32(vnonsign_hi, vdenorm_cutoff);
132 vf = vorrq_u32(vsign_hi, vreinterpretq_u32_f32(vbslq_f32(vxmask_hi, vnorm_hi, vdenorm_hi)));
133 }
134 uint32x2_t vf_lo = vget_low_u32(vf);
135 if (n & (2 * sizeof(uint16_t))) {
136 vst1_f32(output, vreinterpret_f32_u32(vf_lo)); output += 2;
137 vf_lo = vget_high_u32(vf);
138 }
139 if (n & (1 * sizeof(uint16_t))) {
140 vst1_lane_f32(output, vreinterpret_f32_u32(vf_lo), 0);
141 }
142 }
143 }
144