1 // Auto-generated file. Do not edit!
2 // Template: src/f32-f16-vcvt/neon.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/vcvt.h>
16
17
xnn_f32_f16_vcvt_ukernel__neon_x24(size_t n,const float * input,void * output,const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_f16_vcvt_ukernel__neon_x24(
19 size_t n,
20 const float* input,
21 void* output,
22 const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
23 {
24 assert(n != 0);
25 assert(n % sizeof(float) == 0);
26 assert(input != NULL);
27 assert(output != NULL);
28
29 const uint32x4_t vexp_bias = vld1q_dup_u32(¶ms->neon.exp_bias);
30 const float32x4_t vscale_to_inf = vld1q_dup_f32(¶ms->neon.scale_to_inf);
31 const uint32x4_t vexpw_max = vld1q_dup_u32(¶ms->neon.expw_max);
32 const float32x4_t vscale_to_zero = vld1q_dup_f32(¶ms->neon.scale_to_zero);
33 const uint32x4_t vbias_min = vdupq_n_u32(UINT32_C(0x40000000));
34 const uint16x8_t vexph_mask = vdupq_n_u16(UINT16_C(0x7C00));
35 const uint16x8_t vmanth_mask = vdupq_n_u16(UINT16_C(0x0FFF));
36 const uint16x8_t vsignh_mask = vdupq_n_u16(UINT16_C(0x8000));
37 const uint16x8_t vnanh = vdupq_n_u16(UINT16_C(0x7E00));
38
39 uint16_t* o = (uint16_t*) output;
40 for (; n >= 24 * sizeof(float); n -= 24 * sizeof(float)) {
41 const float32x4_t vx0 = vld1q_f32(input); input += 4;
42 const float32x4_t vx1 = vld1q_f32(input); input += 4;
43 const float32x4_t vx2 = vld1q_f32(input); input += 4;
44 const float32x4_t vx3 = vld1q_f32(input); input += 4;
45 const float32x4_t vx4 = vld1q_f32(input); input += 4;
46 const float32x4_t vx5 = vld1q_f32(input); input += 4;
47
48 const float32x4_t vabsx0 = vabsq_f32(vx0);
49 const float32x4_t vabsx1 = vabsq_f32(vx1);
50 const float32x4_t vabsx2 = vabsq_f32(vx2);
51 const float32x4_t vabsx3 = vabsq_f32(vx3);
52 const float32x4_t vabsx4 = vabsq_f32(vx4);
53 const float32x4_t vabsx5 = vabsq_f32(vx5);
54
55 uint32x4_t vbias0 = vaddq_u32(vreinterpretq_u32_f32(vabsx0), vexp_bias);
56 uint32x4_t vbias1 = vaddq_u32(vreinterpretq_u32_f32(vabsx1), vexp_bias);
57 uint32x4_t vbias2 = vaddq_u32(vreinterpretq_u32_f32(vabsx2), vexp_bias);
58 uint32x4_t vbias3 = vaddq_u32(vreinterpretq_u32_f32(vabsx3), vexp_bias);
59 uint32x4_t vbias4 = vaddq_u32(vreinterpretq_u32_f32(vabsx4), vexp_bias);
60 uint32x4_t vbias5 = vaddq_u32(vreinterpretq_u32_f32(vabsx5), vexp_bias);
61
62 float32x4_t vf0 = vmulq_f32(vabsx0, vscale_to_inf);
63 float32x4_t vf1 = vmulq_f32(vabsx1, vscale_to_inf);
64 float32x4_t vf2 = vmulq_f32(vabsx2, vscale_to_inf);
65 float32x4_t vf3 = vmulq_f32(vabsx3, vscale_to_inf);
66 float32x4_t vf4 = vmulq_f32(vabsx4, vscale_to_inf);
67 float32x4_t vf5 = vmulq_f32(vabsx5, vscale_to_inf);
68 const uint32x4_t vnanmaskw0 = vcgtq_u32(vreinterpretq_u32_f32(vabsx0), vexpw_max);
69 const uint32x4_t vnanmaskw1 = vcgtq_u32(vreinterpretq_u32_f32(vabsx1), vexpw_max);
70 const uint32x4_t vnanmaskw2 = vcgtq_u32(vreinterpretq_u32_f32(vabsx2), vexpw_max);
71 const uint32x4_t vnanmaskw3 = vcgtq_u32(vreinterpretq_u32_f32(vabsx3), vexpw_max);
72 const uint32x4_t vnanmaskw4 = vcgtq_u32(vreinterpretq_u32_f32(vabsx4), vexpw_max);
73 const uint32x4_t vnanmaskw5 = vcgtq_u32(vreinterpretq_u32_f32(vabsx5), vexpw_max);
74
75 vbias0 = vandq_u32(vbias0, vexpw_max);
76 vbias1 = vandq_u32(vbias1, vexpw_max);
77 vbias2 = vandq_u32(vbias2, vexpw_max);
78 vbias3 = vandq_u32(vbias3, vexpw_max);
79 vbias4 = vandq_u32(vbias4, vexpw_max);
80 vbias5 = vandq_u32(vbias5, vexpw_max);
81 vf0 = vmulq_f32(vf0, vscale_to_zero);
82 vf1 = vmulq_f32(vf1, vscale_to_zero);
83 vf2 = vmulq_f32(vf2, vscale_to_zero);
84 vf3 = vmulq_f32(vf3, vscale_to_zero);
85 vf4 = vmulq_f32(vf4, vscale_to_zero);
86 vf5 = vmulq_f32(vf5, vscale_to_zero);
87
88 const uint16x8_t vnanmaskh0 = vcombine_u16(vmovn_u32(vnanmaskw0), vmovn_u32(vnanmaskw1));
89 const uint16x8_t vnanmaskh1 = vcombine_u16(vmovn_u32(vnanmaskw2), vmovn_u32(vnanmaskw3));
90 const uint16x8_t vnanmaskh2 = vcombine_u16(vmovn_u32(vnanmaskw4), vmovn_u32(vnanmaskw5));
91 vbias0 = vmaxq_u32(vbias0, vbias_min);
92 vbias1 = vmaxq_u32(vbias1, vbias_min);
93 vbias2 = vmaxq_u32(vbias2, vbias_min);
94 vbias3 = vmaxq_u32(vbias3, vbias_min);
95 vbias4 = vmaxq_u32(vbias4, vbias_min);
96 vbias5 = vmaxq_u32(vbias5, vbias_min);
97
98 vf0 = vaddq_f32(vf0, vreinterpretq_f32_u32(vbias0));
99 vf1 = vaddq_f32(vf1, vreinterpretq_f32_u32(vbias1));
100 vf2 = vaddq_f32(vf2, vreinterpretq_f32_u32(vbias2));
101 vf3 = vaddq_f32(vf3, vreinterpretq_f32_u32(vbias3));
102 vf4 = vaddq_f32(vf4, vreinterpretq_f32_u32(vbias4));
103 vf5 = vaddq_f32(vf5, vreinterpretq_f32_u32(vbias5));
104
105 uint16x8_t vexph0 = vcombine_u16(vshrn_n_u32(vreinterpretq_u32_f32(vf0), 13), vshrn_n_u32(vreinterpretq_u32_f32(vf1), 13));
106 uint16x8_t vexph1 = vcombine_u16(vshrn_n_u32(vreinterpretq_u32_f32(vf2), 13), vshrn_n_u32(vreinterpretq_u32_f32(vf3), 13));
107 uint16x8_t vexph2 = vcombine_u16(vshrn_n_u32(vreinterpretq_u32_f32(vf4), 13), vshrn_n_u32(vreinterpretq_u32_f32(vf5), 13));
108 uint16x8_t vmanth0 = vcombine_u16(vmovn_u32(vreinterpretq_u32_f32(vf0)), vmovn_u32(vreinterpretq_u32_f32(vf1)));
109 uint16x8_t vmanth1 = vcombine_u16(vmovn_u32(vreinterpretq_u32_f32(vf2)), vmovn_u32(vreinterpretq_u32_f32(vf3)));
110 uint16x8_t vmanth2 = vcombine_u16(vmovn_u32(vreinterpretq_u32_f32(vf4)), vmovn_u32(vreinterpretq_u32_f32(vf5)));
111 uint16x8_t vsignh0 = vcombine_u16(vshrn_n_u32(vreinterpretq_u32_f32(vx0), 16), vshrn_n_u32(vreinterpretq_u32_f32(vx1), 16));
112 uint16x8_t vsignh1 = vcombine_u16(vshrn_n_u32(vreinterpretq_u32_f32(vx2), 16), vshrn_n_u32(vreinterpretq_u32_f32(vx3), 16));
113 uint16x8_t vsignh2 = vcombine_u16(vshrn_n_u32(vreinterpretq_u32_f32(vx4), 16), vshrn_n_u32(vreinterpretq_u32_f32(vx5), 16));
114
115 vexph0 = vandq_u16(vexph0, vexph_mask);
116 vexph1 = vandq_u16(vexph1, vexph_mask);
117 vexph2 = vandq_u16(vexph2, vexph_mask);
118 vmanth0 = vandq_u16(vmanth0, vmanth_mask);
119 vmanth1 = vandq_u16(vmanth1, vmanth_mask);
120 vmanth2 = vandq_u16(vmanth2, vmanth_mask);
121 vsignh0 = vandq_u16(vsignh0, vsignh_mask);
122 vsignh1 = vandq_u16(vsignh1, vsignh_mask);
123 vsignh2 = vandq_u16(vsignh2, vsignh_mask);
124
125 uint16x8_t vh0 = vaddq_u16(vmanth0, vexph0);
126 uint16x8_t vh1 = vaddq_u16(vmanth1, vexph1);
127 uint16x8_t vh2 = vaddq_u16(vmanth2, vexph2);
128
129 vh0 = vbslq_u16(vnanmaskh0, vnanh, vh0);
130 vh1 = vbslq_u16(vnanmaskh1, vnanh, vh1);
131 vh2 = vbslq_u16(vnanmaskh2, vnanh, vh2);
132
133 vh0 = vorrq_u16(vh0, vsignh0);
134 vh1 = vorrq_u16(vh1, vsignh1);
135 vh2 = vorrq_u16(vh2, vsignh2);
136
137 vst1q_u16(o, vh0); o += 8;
138 vst1q_u16(o, vh1); o += 8;
139 vst1q_u16(o, vh2); o += 8;
140 }
141 for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
142 const float32x4_t vx = vld1q_f32(input); input += 4;
143
144 const float32x4_t vabsx = vabsq_f32(vx);
145
146 uint32x4_t vbias = vaddq_u32(vreinterpretq_u32_f32(vabsx), vexp_bias);
147
148 float32x4_t vf = vmulq_f32(vabsx, vscale_to_inf);
149 const uint32x4_t vnanmaskw = vcgtq_u32(vreinterpretq_u32_f32(vabsx), vexpw_max);
150
151 vbias = vandq_u32(vbias, vexpw_max);
152 vf = vmulq_f32(vf, vscale_to_zero);
153
154 const uint16x4_t vnanmaskh = vmovn_u32(vnanmaskw);
155 vbias = vmaxq_u32(vbias, vbias_min);
156
157 vf = vaddq_f32(vf, vreinterpretq_f32_u32(vbias));
158
159 uint16x4_t vexph = vshrn_n_u32(vreinterpretq_u32_f32(vf), 13);
160 uint16x4_t vmanth = vmovn_u32(vreinterpretq_u32_f32(vf));
161 uint16x4_t vsignh = vshrn_n_u32(vreinterpretq_u32_f32(vx), 16);
162
163 vexph = vand_u16(vexph, vget_low_u16(vexph_mask));
164 vmanth = vand_u16(vmanth, vget_low_u16(vmanth_mask));
165 vsignh = vand_u16(vsignh, vget_low_u16(vsignh_mask));
166
167 uint16x4_t vh = vadd_u16(vmanth, vexph);
168
169 vh = vbsl_u16(vnanmaskh, vget_low_u16(vnanh), vh);
170
171 vh = vorr_u16(vh, vsignh);
172
173 vst1_u16(o, vh); o += 4;
174 }
175 if XNN_UNLIKELY(n != 0) {
176 assert(n % sizeof(float) == 0);
177 assert(n >= 1 * sizeof(float));
178 assert(n <= 3 * sizeof(float));
179 const float32x4_t vx = vld1q_f32(input);
180
181 const float32x4_t vabsx = vabsq_f32(vx);
182
183 uint32x4_t vbias = vaddq_u32(vreinterpretq_u32_f32(vabsx), vexp_bias);
184
185 float32x4_t vf = vmulq_f32(vabsx, vscale_to_inf);
186 const uint32x4_t vnanmaskw = vcgtq_u32(vreinterpretq_u32_f32(vabsx), vexpw_max);
187
188 vbias = vandq_u32(vbias, vexpw_max);
189 vf = vmulq_f32(vf, vscale_to_zero);
190
191 const uint16x4_t vnanmaskh = vmovn_u32(vnanmaskw);
192 vbias = vmaxq_u32(vbias, vbias_min);
193
194 vf = vaddq_f32(vf, vreinterpretq_f32_u32(vbias));
195
196 uint16x4_t vexph = vshrn_n_u32(vreinterpretq_u32_f32(vf), 13);
197 uint16x4_t vmanth = vmovn_u32(vreinterpretq_u32_f32(vf));
198 uint16x4_t vsignh = vshrn_n_u32(vreinterpretq_u32_f32(vx), 16);
199
200 vexph = vand_u16(vexph, vget_low_u16(vexph_mask));
201 vmanth = vand_u16(vmanth, vget_low_u16(vmanth_mask));
202 vsignh = vand_u16(vsignh, vget_low_u16(vsignh_mask));
203
204 uint16x4_t vh = vadd_u16(vmanth, vexph);
205
206 vh = vbsl_u16(vnanmaskh, vget_low_u16(vnanh), vh);
207
208 vh = vorr_u16(vh, vsignh);
209
210 if (n & (2 * sizeof(float))) {
211 vst1_lane_u32((void*) o, vreinterpret_u32_u16(vh), 0); o += 2;
212 vh = vext_u16(vh, vh, 2);
213 }
214 if (n & (1 * sizeof(float))) {
215 vst1_lane_u16(o, vh, 0);
216 }
217 }
218 }
219