1 // Auto-generated file. Do not edit!
2 // Template: src/f16-f32-vcvt/neon-int16.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/vcvt.h>
16
17
xnn_f16_f32_vcvt_ukernel__neon_int16_x24(size_t n,const void * input,float * output,const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f16_f32_vcvt_ukernel__neon_int16_x24(
19 size_t n,
20 const void* input,
21 float* output,
22 const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
23 {
24 assert(n != 0);
25 assert(n % sizeof(uint16_t) == 0);
26 assert(input != NULL);
27 assert(output != NULL);
28
29 const uint16x8_t vsign_mask = vmovq_n_u16(0x8000);
30 const uint16x8_t vexp_offset = vmovq_n_u16(0x7000);
31 const float32x4_t vexp_scale = vld1q_dup_f32(¶ms->neon.exp_scale);
32 const uint32x4_t vmagic_bias = vmovq_n_u32(0x3F000000);
33 const uint16x8_t vdenorm_cutoff = vmovq_n_u16(0x0400);
34
35 const uint16_t* i = (const uint16_t*) input;
36 for (; n >= 24 * sizeof(uint16_t); n -= 24 * sizeof(uint16_t)) {
37 const uint16x8_t vh0 = vld1q_u16(i); i += 8;
38 const uint16x8_t vh1 = vld1q_u16(i); i += 8;
39 const uint16x8_t vh2 = vld1q_u16(i); i += 8;
40
41 const uint16x8_t vsign0 = vandq_u16(vh0, vsign_mask);
42 const uint16x8_t vsign1 = vandq_u16(vh1, vsign_mask);
43 const uint16x8_t vsign2 = vandq_u16(vh2, vsign_mask);
44
45 const uint16x8_t vnonsign0 = veorq_u16(vh0, vsign0);
46 const uint16x8_t vnonsign1 = veorq_u16(vh1, vsign1);
47 const uint16x8_t vnonsign2 = veorq_u16(vh2, vsign2);
48
49 const uint16x8x2_t vprenorm0 = vzipq_u16(vshlq_n_u16(vnonsign0, 13), vsraq_n_u16(vexp_offset, vnonsign0, 3));
50 const uint16x8x2_t vprenorm1 = vzipq_u16(vshlq_n_u16(vnonsign1, 13), vsraq_n_u16(vexp_offset, vnonsign1, 3));
51 const uint16x8x2_t vprenorm2 = vzipq_u16(vshlq_n_u16(vnonsign2, 13), vsraq_n_u16(vexp_offset, vnonsign2, 3));
52
53 const float32x4_t vnorm0 = vmulq_f32(vreinterpretq_f32_u16(vprenorm0.val[0]), vexp_scale);
54 const float32x4_t vnorm1 = vmulq_f32(vreinterpretq_f32_u16(vprenorm0.val[1]), vexp_scale);
55 const float32x4_t vnorm2 = vmulq_f32(vreinterpretq_f32_u16(vprenorm1.val[0]), vexp_scale);
56 const float32x4_t vnorm3 = vmulq_f32(vreinterpretq_f32_u16(vprenorm1.val[1]), vexp_scale);
57 const float32x4_t vnorm4 = vmulq_f32(vreinterpretq_f32_u16(vprenorm2.val[0]), vexp_scale);
58 const float32x4_t vnorm5 = vmulq_f32(vreinterpretq_f32_u16(vprenorm2.val[1]), vexp_scale);
59
60 const float32x4_t vdenorm0 = vsubq_f32(vreinterpretq_f32_u32(vaddw_u16(vmagic_bias, vget_low_u16(vnonsign0))), vreinterpretq_f32_u32(vmagic_bias));
61 const float32x4_t vdenorm1 = vsubq_f32(vreinterpretq_f32_u32(vaddw_u16(vmagic_bias, vget_high_u16(vnonsign0))), vreinterpretq_f32_u32(vmagic_bias));
62 const float32x4_t vdenorm2 = vsubq_f32(vreinterpretq_f32_u32(vaddw_u16(vmagic_bias, vget_low_u16(vnonsign1))), vreinterpretq_f32_u32(vmagic_bias));
63 const float32x4_t vdenorm3 = vsubq_f32(vreinterpretq_f32_u32(vaddw_u16(vmagic_bias, vget_high_u16(vnonsign1))), vreinterpretq_f32_u32(vmagic_bias));
64 const float32x4_t vdenorm4 = vsubq_f32(vreinterpretq_f32_u32(vaddw_u16(vmagic_bias, vget_low_u16(vnonsign2))), vreinterpretq_f32_u32(vmagic_bias));
65 const float32x4_t vdenorm5 = vsubq_f32(vreinterpretq_f32_u32(vaddw_u16(vmagic_bias, vget_high_u16(vnonsign2))), vreinterpretq_f32_u32(vmagic_bias));
66
67 const uint16x8_t vmask0 = vcgtq_u16(vnonsign0, vdenorm_cutoff);
68 const uint16x8_t vmask1 = vcgtq_u16(vnonsign1, vdenorm_cutoff);
69 const uint16x8_t vmask2 = vcgtq_u16(vnonsign2, vdenorm_cutoff);
70
71 const uint32x4_t vxmask0 = vreinterpretq_u32_s32(vmovl_s16(vreinterpret_s16_u16(vget_low_u16(vmask0))));
72 const uint32x4_t vf0 = vorrq_u32(vshll_n_u16(vget_low_u16(vsign0), 16),
73 vreinterpretq_u32_f32(vbslq_f32(vxmask0, vnorm0, vdenorm0)));
74 const uint32x4_t vxmask2 = vreinterpretq_u32_s32(vmovl_s16(vreinterpret_s16_u16(vget_low_u16(vmask1))));
75 const uint32x4_t vf2 = vorrq_u32(vshll_n_u16(vget_low_u16(vsign1), 16),
76 vreinterpretq_u32_f32(vbslq_f32(vxmask2, vnorm2, vdenorm2)));
77 const uint32x4_t vxmask4 = vreinterpretq_u32_s32(vmovl_s16(vreinterpret_s16_u16(vget_low_u16(vmask2))));
78 const uint32x4_t vf4 = vorrq_u32(vshll_n_u16(vget_low_u16(vsign2), 16),
79 vreinterpretq_u32_f32(vbslq_f32(vxmask4, vnorm4, vdenorm4)));
80
81 const uint32x4_t vxmask1 = vreinterpretq_u32_s32(vmovl_s16(vreinterpret_s16_u16(vget_high_u16(vmask0))));
82 const uint32x4_t vf1 = vorrq_u32(vshll_n_u16(vget_high_u16(vsign0), 16),
83 vreinterpretq_u32_f32(vbslq_f32(vxmask1, vnorm1, vdenorm1)));
84 const uint32x4_t vxmask3 = vreinterpretq_u32_s32(vmovl_s16(vreinterpret_s16_u16(vget_high_u16(vmask1))));
85 const uint32x4_t vf3 = vorrq_u32(vshll_n_u16(vget_high_u16(vsign1), 16),
86 vreinterpretq_u32_f32(vbslq_f32(vxmask3, vnorm3, vdenorm3)));
87 const uint32x4_t vxmask5 = vreinterpretq_u32_s32(vmovl_s16(vreinterpret_s16_u16(vget_high_u16(vmask2))));
88 const uint32x4_t vf5 = vorrq_u32(vshll_n_u16(vget_high_u16(vsign2), 16),
89 vreinterpretq_u32_f32(vbslq_f32(vxmask5, vnorm5, vdenorm5)));
90
91 vst1q_f32(output, vreinterpretq_f32_u32(vf0)); output += 4;
92 vst1q_f32(output, vreinterpretq_f32_u32(vf1)); output += 4;
93 vst1q_f32(output, vreinterpretq_f32_u32(vf2)); output += 4;
94 vst1q_f32(output, vreinterpretq_f32_u32(vf3)); output += 4;
95 vst1q_f32(output, vreinterpretq_f32_u32(vf4)); output += 4;
96 vst1q_f32(output, vreinterpretq_f32_u32(vf5)); output += 4;
97 }
98 for (; n >= 8 * sizeof(uint16_t); n -= 8 * sizeof(uint16_t)) {
99 const uint16x8_t vh = vld1q_u16(i); i += 8;
100
101 const uint16x8_t vsign = vandq_u16(vh, vsign_mask);
102
103 const uint16x8_t vnonsign = veorq_u16(vh, vsign);
104
105 const uint16x8x2_t vprenorm = vzipq_u16(vshlq_n_u16(vnonsign, 13), vsraq_n_u16(vexp_offset, vnonsign, 3));
106 const float32x4_t vnorm_lo = vmulq_f32(vreinterpretq_f32_u16(vprenorm.val[0]), vexp_scale);
107 const float32x4_t vnorm_hi = vmulq_f32(vreinterpretq_f32_u16(vprenorm.val[1]), vexp_scale);
108
109 const float32x4_t vdenorm_lo = vsubq_f32(vreinterpretq_f32_u32(vaddw_u16(vmagic_bias, vget_low_u16(vnonsign))), vreinterpretq_f32_u32(vmagic_bias));
110 const float32x4_t vdenorm_hi = vsubq_f32(vreinterpretq_f32_u32(vaddw_u16(vmagic_bias, vget_high_u16(vnonsign))), vreinterpretq_f32_u32(vmagic_bias));
111
112 const uint16x8_t vmask = vcgtq_u16(vnonsign, vdenorm_cutoff);
113
114 const uint32x4_t vxmask_lo = vreinterpretq_u32_s32(vmovl_s16(vreinterpret_s16_u16(vget_low_u16(vmask))));
115 const uint32x4_t vf_lo = vorrq_u32(vshll_n_u16(vget_low_u16(vsign), 16),
116 vreinterpretq_u32_f32(vbslq_f32(vxmask_lo, vnorm_lo, vdenorm_lo)));
117
118 const uint32x4_t vxmask_hi = vreinterpretq_u32_s32(vmovl_s16(vreinterpret_s16_u16(vget_high_u16(vmask))));
119 const uint32x4_t vf_hi = vorrq_u32(vshll_n_u16(vget_high_u16(vsign), 16),
120 vreinterpretq_u32_f32(vbslq_f32(vxmask_hi, vnorm_hi, vdenorm_hi)));
121
122 vst1q_f32(output, vreinterpretq_f32_u32(vf_lo)); output += 4;
123 vst1q_f32(output, vreinterpretq_f32_u32(vf_hi)); output += 4;
124 }
125 if XNN_UNPREDICTABLE(n != 0) {
126 const uint16x8_t vh = vld1q_u16(i); i += 8;
127
128 const uint16x8_t vsign = vandq_u16(vh, vsign_mask);
129
130 const uint16x8_t vnonsign = veorq_u16(vh, vsign);
131
132 const uint16x8x2_t vprenorm = vzipq_u16(vshlq_n_u16(vnonsign, 13), vsraq_n_u16(vexp_offset, vnonsign, 3));
133 const float32x4_t vnorm_lo = vmulq_f32(vreinterpretq_f32_u16(vprenorm.val[0]), vexp_scale);
134 const float32x4_t vnorm_hi = vmulq_f32(vreinterpretq_f32_u16(vprenorm.val[1]), vexp_scale);
135
136 const float32x4_t vdenorm_lo = vsubq_f32(vreinterpretq_f32_u32(vaddw_u16(vmagic_bias, vget_low_u16(vnonsign))), vreinterpretq_f32_u32(vmagic_bias));
137 const float32x4_t vdenorm_hi = vsubq_f32(vreinterpretq_f32_u32(vaddw_u16(vmagic_bias, vget_high_u16(vnonsign))), vreinterpretq_f32_u32(vmagic_bias));
138
139 const uint16x8_t vmask = vcgtq_u16(vnonsign, vdenorm_cutoff);
140
141 const uint32x4_t vxmask_lo = vreinterpretq_u32_s32(vmovl_s16(vreinterpret_s16_u16(vget_low_u16(vmask))));
142 uint32x4_t vf = vorrq_u32(vshll_n_u16(vget_low_u16(vsign), 16),
143 vreinterpretq_u32_f32(vbslq_f32(vxmask_lo, vnorm_lo, vdenorm_lo)));
144
145 if (n & (4 * sizeof(uint16_t))) {
146 vst1q_f32(output, vreinterpretq_f32_u32(vf)); output += 4;
147
148 const uint32x4_t vxmask_hi = vreinterpretq_u32_s32(vmovl_s16(vreinterpret_s16_u16(vget_high_u16(vmask))));
149 vf = vorrq_u32(vshll_n_u16(vget_high_u16(vsign), 16),
150 vreinterpretq_u32_f32(vbslq_f32(vxmask_hi, vnorm_hi, vdenorm_hi)));
151 }
152 uint32x2_t vf_lo = vget_low_u32(vf);
153 if (n & (2 * sizeof(uint16_t))) {
154 vst1_f32(output, vreinterpret_f32_u32(vf_lo)); output += 2;
155 vf_lo = vget_high_u32(vf);
156 }
157 if (n & (1 * sizeof(uint16_t))) {
158 vst1_lane_f32(output, vreinterpret_f32_u32(vf_lo), 0);
159 }
160 }
161 }
162