xref: /aosp_15_r20/external/XNNPACK/src/f32-f16-vcvt/gen/vcvt-neon-x32.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-f16-vcvt/neon.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/vcvt.h>
16 
17 
xnn_f32_f16_vcvt_ukernel__neon_x32(size_t n,const float * input,void * output,const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_f16_vcvt_ukernel__neon_x32(
19     size_t n,
20     const float* input,
21     void* output,
22     const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
23 {
24   assert(n != 0);
25   assert(n % sizeof(float) == 0);
26   assert(input != NULL);
27   assert(output != NULL);
28 
29   const uint32x4_t vexp_bias = vld1q_dup_u32(&params->neon.exp_bias);
30   const float32x4_t vscale_to_inf = vld1q_dup_f32(&params->neon.scale_to_inf);
31   const uint32x4_t vexpw_max = vld1q_dup_u32(&params->neon.expw_max);
32   const float32x4_t vscale_to_zero = vld1q_dup_f32(&params->neon.scale_to_zero);
33   const uint32x4_t vbias_min = vdupq_n_u32(UINT32_C(0x40000000));
34   const uint16x8_t vexph_mask = vdupq_n_u16(UINT16_C(0x7C00));
35   const uint16x8_t vmanth_mask = vdupq_n_u16(UINT16_C(0x0FFF));
36   const uint16x8_t vsignh_mask = vdupq_n_u16(UINT16_C(0x8000));
37   const uint16x8_t vnanh = vdupq_n_u16(UINT16_C(0x7E00));
38 
39   uint16_t* o = (uint16_t*) output;
40   for (; n >= 32 * sizeof(float); n -= 32 * sizeof(float)) {
41     const float32x4_t vx0 = vld1q_f32(input); input += 4;
42     const float32x4_t vx1 = vld1q_f32(input); input += 4;
43     const float32x4_t vx2 = vld1q_f32(input); input += 4;
44     const float32x4_t vx3 = vld1q_f32(input); input += 4;
45     const float32x4_t vx4 = vld1q_f32(input); input += 4;
46     const float32x4_t vx5 = vld1q_f32(input); input += 4;
47     const float32x4_t vx6 = vld1q_f32(input); input += 4;
48     const float32x4_t vx7 = vld1q_f32(input); input += 4;
49 
50     const float32x4_t vabsx0 = vabsq_f32(vx0);
51     const float32x4_t vabsx1 = vabsq_f32(vx1);
52     const float32x4_t vabsx2 = vabsq_f32(vx2);
53     const float32x4_t vabsx3 = vabsq_f32(vx3);
54     const float32x4_t vabsx4 = vabsq_f32(vx4);
55     const float32x4_t vabsx5 = vabsq_f32(vx5);
56     const float32x4_t vabsx6 = vabsq_f32(vx6);
57     const float32x4_t vabsx7 = vabsq_f32(vx7);
58 
59     uint32x4_t vbias0 = vaddq_u32(vreinterpretq_u32_f32(vabsx0), vexp_bias);
60     uint32x4_t vbias1 = vaddq_u32(vreinterpretq_u32_f32(vabsx1), vexp_bias);
61     uint32x4_t vbias2 = vaddq_u32(vreinterpretq_u32_f32(vabsx2), vexp_bias);
62     uint32x4_t vbias3 = vaddq_u32(vreinterpretq_u32_f32(vabsx3), vexp_bias);
63     uint32x4_t vbias4 = vaddq_u32(vreinterpretq_u32_f32(vabsx4), vexp_bias);
64     uint32x4_t vbias5 = vaddq_u32(vreinterpretq_u32_f32(vabsx5), vexp_bias);
65     uint32x4_t vbias6 = vaddq_u32(vreinterpretq_u32_f32(vabsx6), vexp_bias);
66     uint32x4_t vbias7 = vaddq_u32(vreinterpretq_u32_f32(vabsx7), vexp_bias);
67 
68     float32x4_t vf0 = vmulq_f32(vabsx0, vscale_to_inf);
69     float32x4_t vf1 = vmulq_f32(vabsx1, vscale_to_inf);
70     float32x4_t vf2 = vmulq_f32(vabsx2, vscale_to_inf);
71     float32x4_t vf3 = vmulq_f32(vabsx3, vscale_to_inf);
72     float32x4_t vf4 = vmulq_f32(vabsx4, vscale_to_inf);
73     float32x4_t vf5 = vmulq_f32(vabsx5, vscale_to_inf);
74     float32x4_t vf6 = vmulq_f32(vabsx6, vscale_to_inf);
75     float32x4_t vf7 = vmulq_f32(vabsx7, vscale_to_inf);
76     const uint32x4_t vnanmaskw0 = vcgtq_u32(vreinterpretq_u32_f32(vabsx0), vexpw_max);
77     const uint32x4_t vnanmaskw1 = vcgtq_u32(vreinterpretq_u32_f32(vabsx1), vexpw_max);
78     const uint32x4_t vnanmaskw2 = vcgtq_u32(vreinterpretq_u32_f32(vabsx2), vexpw_max);
79     const uint32x4_t vnanmaskw3 = vcgtq_u32(vreinterpretq_u32_f32(vabsx3), vexpw_max);
80     const uint32x4_t vnanmaskw4 = vcgtq_u32(vreinterpretq_u32_f32(vabsx4), vexpw_max);
81     const uint32x4_t vnanmaskw5 = vcgtq_u32(vreinterpretq_u32_f32(vabsx5), vexpw_max);
82     const uint32x4_t vnanmaskw6 = vcgtq_u32(vreinterpretq_u32_f32(vabsx6), vexpw_max);
83     const uint32x4_t vnanmaskw7 = vcgtq_u32(vreinterpretq_u32_f32(vabsx7), vexpw_max);
84 
85     vbias0 = vandq_u32(vbias0, vexpw_max);
86     vbias1 = vandq_u32(vbias1, vexpw_max);
87     vbias2 = vandq_u32(vbias2, vexpw_max);
88     vbias3 = vandq_u32(vbias3, vexpw_max);
89     vbias4 = vandq_u32(vbias4, vexpw_max);
90     vbias5 = vandq_u32(vbias5, vexpw_max);
91     vbias6 = vandq_u32(vbias6, vexpw_max);
92     vbias7 = vandq_u32(vbias7, vexpw_max);
93     vf0 = vmulq_f32(vf0, vscale_to_zero);
94     vf1 = vmulq_f32(vf1, vscale_to_zero);
95     vf2 = vmulq_f32(vf2, vscale_to_zero);
96     vf3 = vmulq_f32(vf3, vscale_to_zero);
97     vf4 = vmulq_f32(vf4, vscale_to_zero);
98     vf5 = vmulq_f32(vf5, vscale_to_zero);
99     vf6 = vmulq_f32(vf6, vscale_to_zero);
100     vf7 = vmulq_f32(vf7, vscale_to_zero);
101 
102     const uint16x8_t vnanmaskh0 = vcombine_u16(vmovn_u32(vnanmaskw0), vmovn_u32(vnanmaskw1));
103     const uint16x8_t vnanmaskh1 = vcombine_u16(vmovn_u32(vnanmaskw2), vmovn_u32(vnanmaskw3));
104     const uint16x8_t vnanmaskh2 = vcombine_u16(vmovn_u32(vnanmaskw4), vmovn_u32(vnanmaskw5));
105     const uint16x8_t vnanmaskh3 = vcombine_u16(vmovn_u32(vnanmaskw6), vmovn_u32(vnanmaskw7));
106     vbias0 = vmaxq_u32(vbias0, vbias_min);
107     vbias1 = vmaxq_u32(vbias1, vbias_min);
108     vbias2 = vmaxq_u32(vbias2, vbias_min);
109     vbias3 = vmaxq_u32(vbias3, vbias_min);
110     vbias4 = vmaxq_u32(vbias4, vbias_min);
111     vbias5 = vmaxq_u32(vbias5, vbias_min);
112     vbias6 = vmaxq_u32(vbias6, vbias_min);
113     vbias7 = vmaxq_u32(vbias7, vbias_min);
114 
115     vf0 = vaddq_f32(vf0, vreinterpretq_f32_u32(vbias0));
116     vf1 = vaddq_f32(vf1, vreinterpretq_f32_u32(vbias1));
117     vf2 = vaddq_f32(vf2, vreinterpretq_f32_u32(vbias2));
118     vf3 = vaddq_f32(vf3, vreinterpretq_f32_u32(vbias3));
119     vf4 = vaddq_f32(vf4, vreinterpretq_f32_u32(vbias4));
120     vf5 = vaddq_f32(vf5, vreinterpretq_f32_u32(vbias5));
121     vf6 = vaddq_f32(vf6, vreinterpretq_f32_u32(vbias6));
122     vf7 = vaddq_f32(vf7, vreinterpretq_f32_u32(vbias7));
123 
124     uint16x8_t vexph0 = vcombine_u16(vshrn_n_u32(vreinterpretq_u32_f32(vf0), 13), vshrn_n_u32(vreinterpretq_u32_f32(vf1), 13));
125     uint16x8_t vexph1 = vcombine_u16(vshrn_n_u32(vreinterpretq_u32_f32(vf2), 13), vshrn_n_u32(vreinterpretq_u32_f32(vf3), 13));
126     uint16x8_t vexph2 = vcombine_u16(vshrn_n_u32(vreinterpretq_u32_f32(vf4), 13), vshrn_n_u32(vreinterpretq_u32_f32(vf5), 13));
127     uint16x8_t vexph3 = vcombine_u16(vshrn_n_u32(vreinterpretq_u32_f32(vf6), 13), vshrn_n_u32(vreinterpretq_u32_f32(vf7), 13));
128     uint16x8_t vmanth0 = vcombine_u16(vmovn_u32(vreinterpretq_u32_f32(vf0)), vmovn_u32(vreinterpretq_u32_f32(vf1)));
129     uint16x8_t vmanth1 = vcombine_u16(vmovn_u32(vreinterpretq_u32_f32(vf2)), vmovn_u32(vreinterpretq_u32_f32(vf3)));
130     uint16x8_t vmanth2 = vcombine_u16(vmovn_u32(vreinterpretq_u32_f32(vf4)), vmovn_u32(vreinterpretq_u32_f32(vf5)));
131     uint16x8_t vmanth3 = vcombine_u16(vmovn_u32(vreinterpretq_u32_f32(vf6)), vmovn_u32(vreinterpretq_u32_f32(vf7)));
132     uint16x8_t vsignh0 = vcombine_u16(vshrn_n_u32(vreinterpretq_u32_f32(vx0), 16), vshrn_n_u32(vreinterpretq_u32_f32(vx1), 16));
133     uint16x8_t vsignh1 = vcombine_u16(vshrn_n_u32(vreinterpretq_u32_f32(vx2), 16), vshrn_n_u32(vreinterpretq_u32_f32(vx3), 16));
134     uint16x8_t vsignh2 = vcombine_u16(vshrn_n_u32(vreinterpretq_u32_f32(vx4), 16), vshrn_n_u32(vreinterpretq_u32_f32(vx5), 16));
135     uint16x8_t vsignh3 = vcombine_u16(vshrn_n_u32(vreinterpretq_u32_f32(vx6), 16), vshrn_n_u32(vreinterpretq_u32_f32(vx7), 16));
136 
137     vexph0 = vandq_u16(vexph0, vexph_mask);
138     vexph1 = vandq_u16(vexph1, vexph_mask);
139     vexph2 = vandq_u16(vexph2, vexph_mask);
140     vexph3 = vandq_u16(vexph3, vexph_mask);
141     vmanth0 = vandq_u16(vmanth0, vmanth_mask);
142     vmanth1 = vandq_u16(vmanth1, vmanth_mask);
143     vmanth2 = vandq_u16(vmanth2, vmanth_mask);
144     vmanth3 = vandq_u16(vmanth3, vmanth_mask);
145     vsignh0 = vandq_u16(vsignh0, vsignh_mask);
146     vsignh1 = vandq_u16(vsignh1, vsignh_mask);
147     vsignh2 = vandq_u16(vsignh2, vsignh_mask);
148     vsignh3 = vandq_u16(vsignh3, vsignh_mask);
149 
150     uint16x8_t vh0 = vaddq_u16(vmanth0, vexph0);
151     uint16x8_t vh1 = vaddq_u16(vmanth1, vexph1);
152     uint16x8_t vh2 = vaddq_u16(vmanth2, vexph2);
153     uint16x8_t vh3 = vaddq_u16(vmanth3, vexph3);
154 
155     vh0 = vbslq_u16(vnanmaskh0, vnanh, vh0);
156     vh1 = vbslq_u16(vnanmaskh1, vnanh, vh1);
157     vh2 = vbslq_u16(vnanmaskh2, vnanh, vh2);
158     vh3 = vbslq_u16(vnanmaskh3, vnanh, vh3);
159 
160     vh0 = vorrq_u16(vh0, vsignh0);
161     vh1 = vorrq_u16(vh1, vsignh1);
162     vh2 = vorrq_u16(vh2, vsignh2);
163     vh3 = vorrq_u16(vh3, vsignh3);
164 
165     vst1q_u16(o, vh0); o += 8;
166     vst1q_u16(o, vh1); o += 8;
167     vst1q_u16(o, vh2); o += 8;
168     vst1q_u16(o, vh3); o += 8;
169   }
170   for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
171     const float32x4_t vx = vld1q_f32(input); input += 4;
172 
173     const float32x4_t vabsx = vabsq_f32(vx);
174 
175     uint32x4_t vbias = vaddq_u32(vreinterpretq_u32_f32(vabsx), vexp_bias);
176 
177     float32x4_t vf = vmulq_f32(vabsx, vscale_to_inf);
178     const uint32x4_t vnanmaskw = vcgtq_u32(vreinterpretq_u32_f32(vabsx), vexpw_max);
179 
180     vbias = vandq_u32(vbias, vexpw_max);
181     vf = vmulq_f32(vf, vscale_to_zero);
182 
183     const uint16x4_t vnanmaskh = vmovn_u32(vnanmaskw);
184     vbias = vmaxq_u32(vbias, vbias_min);
185 
186     vf = vaddq_f32(vf, vreinterpretq_f32_u32(vbias));
187 
188     uint16x4_t vexph = vshrn_n_u32(vreinterpretq_u32_f32(vf), 13);
189     uint16x4_t vmanth = vmovn_u32(vreinterpretq_u32_f32(vf));
190     uint16x4_t vsignh = vshrn_n_u32(vreinterpretq_u32_f32(vx), 16);
191 
192     vexph = vand_u16(vexph, vget_low_u16(vexph_mask));
193     vmanth = vand_u16(vmanth, vget_low_u16(vmanth_mask));
194     vsignh = vand_u16(vsignh, vget_low_u16(vsignh_mask));
195 
196     uint16x4_t vh = vadd_u16(vmanth, vexph);
197 
198     vh = vbsl_u16(vnanmaskh, vget_low_u16(vnanh), vh);
199 
200     vh = vorr_u16(vh, vsignh);
201 
202     vst1_u16(o, vh); o += 4;
203   }
204   if XNN_UNLIKELY(n != 0) {
205     assert(n % sizeof(float) == 0);
206     assert(n >= 1 * sizeof(float));
207     assert(n <= 3 * sizeof(float));
208     const float32x4_t vx = vld1q_f32(input);
209 
210     const float32x4_t vabsx = vabsq_f32(vx);
211 
212     uint32x4_t vbias = vaddq_u32(vreinterpretq_u32_f32(vabsx), vexp_bias);
213 
214     float32x4_t vf = vmulq_f32(vabsx, vscale_to_inf);
215     const uint32x4_t vnanmaskw = vcgtq_u32(vreinterpretq_u32_f32(vabsx), vexpw_max);
216 
217     vbias = vandq_u32(vbias, vexpw_max);
218     vf = vmulq_f32(vf, vscale_to_zero);
219 
220     const uint16x4_t vnanmaskh = vmovn_u32(vnanmaskw);
221     vbias = vmaxq_u32(vbias, vbias_min);
222 
223     vf = vaddq_f32(vf, vreinterpretq_f32_u32(vbias));
224 
225     uint16x4_t vexph = vshrn_n_u32(vreinterpretq_u32_f32(vf), 13);
226     uint16x4_t vmanth = vmovn_u32(vreinterpretq_u32_f32(vf));
227     uint16x4_t vsignh = vshrn_n_u32(vreinterpretq_u32_f32(vx), 16);
228 
229     vexph = vand_u16(vexph, vget_low_u16(vexph_mask));
230     vmanth = vand_u16(vmanth, vget_low_u16(vmanth_mask));
231     vsignh = vand_u16(vsignh, vget_low_u16(vsignh_mask));
232 
233     uint16x4_t vh = vadd_u16(vmanth, vexph);
234 
235     vh = vbsl_u16(vnanmaskh, vget_low_u16(vnanh), vh);
236 
237     vh = vorr_u16(vh, vsignh);
238 
239     if (n & (2 * sizeof(float))) {
240       vst1_lane_u32((void*) o, vreinterpret_u32_u16(vh), 0); o += 2;
241       vh = vext_u16(vh, vh, 2);
242     }
243     if (n & (1 * sizeof(float))) {
244       vst1_lane_u16(o, vh, 0);
245     }
246   }
247 }
248