1 // Auto-generated file. Do not edit!
2 // Template: src/f32-f16-vcvt/sse.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <emmintrin.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/unaligned.h>
16 #include <xnnpack/vcvt.h>
17
18
xnn_f32_f16_vcvt_ukernel__sse2_x32(size_t n,const float * input,void * output,const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_f16_vcvt_ukernel__sse2_x32(
20 size_t n,
21 const float* input,
22 void* output,
23 const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
24 {
25 assert(n != 0);
26 assert(n % sizeof(float) == 0);
27 assert(input != NULL);
28 assert(output != NULL);
29
30 const __m128 vnonsign_mask = _mm_load_ps((const float*) params->sse2.nonsign_mask);
31 const __m128i vexp_bias = _mm_load_si128((const __m128i*) params->sse2.exp_bias);
32 const __m128 vscale_to_inf = _mm_load_ps(params->sse2.scale_to_inf);
33 const __m128i vexpw_max = _mm_load_si128((const __m128i*) params->sse2.expw_max);
34 const __m128 vscale_to_zero = _mm_load_ps(params->sse2.scale_to_zero);
35 const __m128i vbias_min = _mm_load_si128((const __m128i*) params->sse2.bias_min);
36 const __m128i vmanth_mask = _mm_load_si128((const __m128i*) params->sse2.manth_mask);
37 const __m128i vexph_mask = _mm_load_si128((const __m128i*) params->sse2.exph_mask);
38 const __m128i vnanh = _mm_load_si128((const __m128i*) params->sse2.nanh);
39
40 uint16_t* o = (uint16_t*) output;
41 for (; n >= 32 * sizeof(float); n -= 32 * sizeof(float)) {
42 const __m128 vx0 = _mm_loadu_ps(input);
43 const __m128 vx1 = _mm_loadu_ps(input + 4);
44 const __m128 vx2 = _mm_loadu_ps(input + 8);
45 const __m128 vx3 = _mm_loadu_ps(input + 12);
46 const __m128 vx4 = _mm_loadu_ps(input + 16);
47 const __m128 vx5 = _mm_loadu_ps(input + 20);
48 const __m128 vx6 = _mm_loadu_ps(input + 24);
49 const __m128 vx7 = _mm_loadu_ps(input + 28);
50 input += 32;
51
52 const __m128 vabsx0 = _mm_and_ps(vx0, vnonsign_mask);
53 const __m128 vabsx1 = _mm_and_ps(vx1, vnonsign_mask);
54 const __m128 vabsx2 = _mm_and_ps(vx2, vnonsign_mask);
55 const __m128 vabsx3 = _mm_and_ps(vx3, vnonsign_mask);
56 const __m128 vabsx4 = _mm_and_ps(vx4, vnonsign_mask);
57 const __m128 vabsx5 = _mm_and_ps(vx5, vnonsign_mask);
58 const __m128 vabsx6 = _mm_and_ps(vx6, vnonsign_mask);
59 const __m128 vabsx7 = _mm_and_ps(vx7, vnonsign_mask);
60
61 const __m128 vsignx0 = _mm_xor_ps(vx0, vabsx0);
62 const __m128 vsignx1 = _mm_xor_ps(vx1, vabsx1);
63 const __m128 vsignx2 = _mm_xor_ps(vx2, vabsx2);
64 const __m128 vsignx3 = _mm_xor_ps(vx3, vabsx3);
65 const __m128 vsignx4 = _mm_xor_ps(vx4, vabsx4);
66 const __m128 vsignx5 = _mm_xor_ps(vx5, vabsx5);
67 const __m128 vsignx6 = _mm_xor_ps(vx6, vabsx6);
68 const __m128 vsignx7 = _mm_xor_ps(vx7, vabsx7);
69
70 __m128i vbias0 = _mm_add_epi32(_mm_castps_si128(vabsx0), vexp_bias);
71 __m128i vbias1 = _mm_add_epi32(_mm_castps_si128(vabsx1), vexp_bias);
72 __m128i vbias2 = _mm_add_epi32(_mm_castps_si128(vabsx2), vexp_bias);
73 __m128i vbias3 = _mm_add_epi32(_mm_castps_si128(vabsx3), vexp_bias);
74 __m128i vbias4 = _mm_add_epi32(_mm_castps_si128(vabsx4), vexp_bias);
75 __m128i vbias5 = _mm_add_epi32(_mm_castps_si128(vabsx5), vexp_bias);
76 __m128i vbias6 = _mm_add_epi32(_mm_castps_si128(vabsx6), vexp_bias);
77 __m128i vbias7 = _mm_add_epi32(_mm_castps_si128(vabsx7), vexp_bias);
78
79 __m128 vf0 = _mm_mul_ps(vabsx0, vscale_to_inf);
80 __m128 vf1 = _mm_mul_ps(vabsx1, vscale_to_inf);
81 __m128 vf2 = _mm_mul_ps(vabsx2, vscale_to_inf);
82 __m128 vf3 = _mm_mul_ps(vabsx3, vscale_to_inf);
83 __m128 vf4 = _mm_mul_ps(vabsx4, vscale_to_inf);
84 __m128 vf5 = _mm_mul_ps(vabsx5, vscale_to_inf);
85 __m128 vf6 = _mm_mul_ps(vabsx6, vscale_to_inf);
86 __m128 vf7 = _mm_mul_ps(vabsx7, vscale_to_inf);
87
88 const __m128i vnanmaskw0 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx0), vexpw_max);
89 const __m128i vnanmaskw1 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx1), vexpw_max);
90 const __m128i vnanmaskw2 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx2), vexpw_max);
91 const __m128i vnanmaskw3 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx3), vexpw_max);
92 const __m128i vnanmaskw4 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx4), vexpw_max);
93 const __m128i vnanmaskw5 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx5), vexpw_max);
94 const __m128i vnanmaskw6 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx6), vexpw_max);
95 const __m128i vnanmaskw7 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx7), vexpw_max);
96
97 vbias0 = _mm_and_si128(vbias0, vexpw_max);
98 vbias1 = _mm_and_si128(vbias1, vexpw_max);
99 vbias2 = _mm_and_si128(vbias2, vexpw_max);
100 vbias3 = _mm_and_si128(vbias3, vexpw_max);
101 vbias4 = _mm_and_si128(vbias4, vexpw_max);
102 vbias5 = _mm_and_si128(vbias5, vexpw_max);
103 vbias6 = _mm_and_si128(vbias6, vexpw_max);
104 vbias7 = _mm_and_si128(vbias7, vexpw_max);
105
106 vf0 = _mm_mul_ps(vf0, vscale_to_zero);
107 vf1 = _mm_mul_ps(vf1, vscale_to_zero);
108 vf2 = _mm_mul_ps(vf2, vscale_to_zero);
109 vf3 = _mm_mul_ps(vf3, vscale_to_zero);
110 vf4 = _mm_mul_ps(vf4, vscale_to_zero);
111 vf5 = _mm_mul_ps(vf5, vscale_to_zero);
112 vf6 = _mm_mul_ps(vf6, vscale_to_zero);
113 vf7 = _mm_mul_ps(vf7, vscale_to_zero);
114
115 const __m128i vnanmaskh0 = _mm_packs_epi32(vnanmaskw0, vnanmaskw1);
116 const __m128i vnanmaskh1 = _mm_packs_epi32(vnanmaskw2, vnanmaskw3);
117 const __m128i vnanmaskh2 = _mm_packs_epi32(vnanmaskw4, vnanmaskw5);
118 const __m128i vnanmaskh3 = _mm_packs_epi32(vnanmaskw6, vnanmaskw7);
119
120 const __m128i vsignh0 = _mm_packs_epi32(_mm_castps_si128(vsignx0), _mm_castps_si128(vsignx1));
121 const __m128i vsignh1 = _mm_packs_epi32(_mm_castps_si128(vsignx2), _mm_castps_si128(vsignx3));
122 const __m128i vsignh2 = _mm_packs_epi32(_mm_castps_si128(vsignx4), _mm_castps_si128(vsignx5));
123 const __m128i vsignh3 = _mm_packs_epi32(_mm_castps_si128(vsignx6), _mm_castps_si128(vsignx7));
124
125 vbias0 = _mm_max_epi16(vbias0, vbias_min);
126 vbias1 = _mm_max_epi16(vbias1, vbias_min);
127 vbias2 = _mm_max_epi16(vbias2, vbias_min);
128 vbias3 = _mm_max_epi16(vbias3, vbias_min);
129 vbias4 = _mm_max_epi16(vbias4, vbias_min);
130 vbias5 = _mm_max_epi16(vbias5, vbias_min);
131 vbias6 = _mm_max_epi16(vbias6, vbias_min);
132 vbias7 = _mm_max_epi16(vbias7, vbias_min);
133
134 __m128i vh0 = _mm_and_si128(vnanh, vnanmaskh0);
135 __m128i vh1 = _mm_and_si128(vnanh, vnanmaskh1);
136 __m128i vh2 = _mm_and_si128(vnanh, vnanmaskh2);
137 __m128i vh3 = _mm_and_si128(vnanh, vnanmaskh3);
138
139 vf0 = _mm_add_ps(vf0, _mm_castsi128_ps(vbias0));
140 vf1 = _mm_add_ps(vf1, _mm_castsi128_ps(vbias1));
141 vf2 = _mm_add_ps(vf2, _mm_castsi128_ps(vbias2));
142 vf3 = _mm_add_ps(vf3, _mm_castsi128_ps(vbias3));
143 vf4 = _mm_add_ps(vf4, _mm_castsi128_ps(vbias4));
144 vf5 = _mm_add_ps(vf5, _mm_castsi128_ps(vbias5));
145 vf6 = _mm_add_ps(vf6, _mm_castsi128_ps(vbias6));
146 vf7 = _mm_add_ps(vf7, _mm_castsi128_ps(vbias7));
147
148 vh0 = _mm_or_si128(vh0, vsignh0);
149 vh1 = _mm_or_si128(vh1, vsignh1);
150 vh2 = _mm_or_si128(vh2, vsignh2);
151 vh3 = _mm_or_si128(vh3, vsignh3);
152
153 __m128i vexpw0 = _mm_srli_epi32(_mm_castps_si128(vf0), 13);
154 __m128i vexpw1 = _mm_srli_epi32(_mm_castps_si128(vf1), 13);
155 __m128i vexpw2 = _mm_srli_epi32(_mm_castps_si128(vf2), 13);
156 __m128i vexpw3 = _mm_srli_epi32(_mm_castps_si128(vf3), 13);
157 __m128i vexpw4 = _mm_srli_epi32(_mm_castps_si128(vf4), 13);
158 __m128i vexpw5 = _mm_srli_epi32(_mm_castps_si128(vf5), 13);
159 __m128i vexpw6 = _mm_srli_epi32(_mm_castps_si128(vf6), 13);
160 __m128i vexpw7 = _mm_srli_epi32(_mm_castps_si128(vf7), 13);
161
162 const __m128i vmantw0 = _mm_and_si128(_mm_castps_si128(vf0), vmanth_mask);
163 const __m128i vmantw1 = _mm_and_si128(_mm_castps_si128(vf1), vmanth_mask);
164 const __m128i vmantw2 = _mm_and_si128(_mm_castps_si128(vf2), vmanth_mask);
165 const __m128i vmantw3 = _mm_and_si128(_mm_castps_si128(vf3), vmanth_mask);
166 const __m128i vmantw4 = _mm_and_si128(_mm_castps_si128(vf4), vmanth_mask);
167 const __m128i vmantw5 = _mm_and_si128(_mm_castps_si128(vf5), vmanth_mask);
168 const __m128i vmantw6 = _mm_and_si128(_mm_castps_si128(vf6), vmanth_mask);
169 const __m128i vmantw7 = _mm_and_si128(_mm_castps_si128(vf7), vmanth_mask);
170
171 vexpw0 = _mm_and_si128(vexpw0, vexph_mask);
172 vexpw1 = _mm_and_si128(vexpw1, vexph_mask);
173 vexpw2 = _mm_and_si128(vexpw2, vexph_mask);
174 vexpw3 = _mm_and_si128(vexpw3, vexph_mask);
175 vexpw4 = _mm_and_si128(vexpw4, vexph_mask);
176 vexpw5 = _mm_and_si128(vexpw5, vexph_mask);
177 vexpw6 = _mm_and_si128(vexpw6, vexph_mask);
178 vexpw7 = _mm_and_si128(vexpw7, vexph_mask);
179
180 const __m128i vnonsignw0 = _mm_add_epi32(vmantw0, vexpw0);
181 const __m128i vnonsignw1 = _mm_add_epi32(vmantw1, vexpw1);
182 const __m128i vnonsignw2 = _mm_add_epi32(vmantw2, vexpw2);
183 const __m128i vnonsignw3 = _mm_add_epi32(vmantw3, vexpw3);
184 const __m128i vnonsignw4 = _mm_add_epi32(vmantw4, vexpw4);
185 const __m128i vnonsignw5 = _mm_add_epi32(vmantw5, vexpw5);
186 const __m128i vnonsignw6 = _mm_add_epi32(vmantw6, vexpw6);
187 const __m128i vnonsignw7 = _mm_add_epi32(vmantw7, vexpw7);
188
189 const __m128i vnonsignh0 = _mm_packs_epi32(vnonsignw0, vnonsignw1);
190 const __m128i vnonsignh1 = _mm_packs_epi32(vnonsignw2, vnonsignw3);
191 const __m128i vnonsignh2 = _mm_packs_epi32(vnonsignw4, vnonsignw5);
192 const __m128i vnonsignh3 = _mm_packs_epi32(vnonsignw6, vnonsignw7);
193
194 vh0 = _mm_or_si128(vh0, _mm_andnot_si128(vnanmaskh0, vnonsignh0));
195 vh1 = _mm_or_si128(vh1, _mm_andnot_si128(vnanmaskh1, vnonsignh1));
196 vh2 = _mm_or_si128(vh2, _mm_andnot_si128(vnanmaskh2, vnonsignh2));
197 vh3 = _mm_or_si128(vh3, _mm_andnot_si128(vnanmaskh3, vnonsignh3));
198
199 _mm_storeu_si128((__m128i*) o, vh0);
200 _mm_storeu_si128((__m128i*) (o + 8), vh1);
201 _mm_storeu_si128((__m128i*) (o + 16), vh2);
202 _mm_storeu_si128((__m128i*) (o + 24), vh3);
203 o += 32;
204 }
205 for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
206 const __m128 vx_lo = _mm_loadu_ps(input);
207 const __m128 vx_hi = _mm_loadu_ps(input + 4);
208 input += 8;
209
210 const __m128 vabsx_lo = _mm_and_ps(vx_lo, vnonsign_mask);
211 const __m128 vabsx_hi = _mm_and_ps(vx_hi, vnonsign_mask);
212
213 const __m128 vsignx_lo = _mm_xor_ps(vx_lo, vabsx_lo);
214 const __m128 vsignx_hi = _mm_xor_ps(vx_hi, vabsx_hi);
215 __m128i vbias_lo = _mm_add_epi32(_mm_castps_si128(vabsx_lo), vexp_bias);
216 __m128i vbias_hi = _mm_add_epi32(_mm_castps_si128(vabsx_hi), vexp_bias);
217 __m128 vf_lo = _mm_mul_ps(vabsx_lo, vscale_to_inf);
218 __m128 vf_hi = _mm_mul_ps(vabsx_hi, vscale_to_inf);
219 const __m128i vnanmaskw_lo = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_lo), vexpw_max);
220 const __m128i vnanmaskw_hi = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_hi), vexpw_max);
221
222 vbias_lo = _mm_and_si128(vbias_lo, vexpw_max);
223 vbias_hi = _mm_and_si128(vbias_hi, vexpw_max);
224 vf_lo = _mm_mul_ps(vf_lo, vscale_to_zero);
225 vf_hi = _mm_mul_ps(vf_hi, vscale_to_zero);
226 const __m128i vnanmaskh = _mm_packs_epi32(vnanmaskw_lo, vnanmaskw_hi);
227 const __m128i vsignh = _mm_packs_epi32(_mm_castps_si128(vsignx_lo), _mm_castps_si128(vsignx_hi));
228
229 vbias_lo = _mm_max_epi16(vbias_lo, vbias_min);
230 vbias_hi = _mm_max_epi16(vbias_hi, vbias_min);
231 __m128i vh = _mm_and_si128(vnanh, vnanmaskh);
232
233 vf_lo = _mm_add_ps(vf_lo, _mm_castsi128_ps(vbias_lo));
234 vf_hi = _mm_add_ps(vf_hi, _mm_castsi128_ps(vbias_hi));
235 vh = _mm_or_si128(vh, vsignh);
236
237 __m128i vexpw_lo = _mm_srli_epi32(_mm_castps_si128(vf_lo), 13);
238 __m128i vexpw_hi = _mm_srli_epi32(_mm_castps_si128(vf_hi), 13);
239 const __m128i vmantw_lo = _mm_and_si128(_mm_castps_si128(vf_lo), vmanth_mask);
240 const __m128i vmantw_hi = _mm_and_si128(_mm_castps_si128(vf_hi), vmanth_mask);
241
242 vexpw_lo = _mm_and_si128(vexpw_lo, vexph_mask);
243 vexpw_hi = _mm_and_si128(vexpw_hi, vexph_mask);
244
245 const __m128i vnonsignw_lo = _mm_add_epi32(vmantw_lo, vexpw_lo);
246 const __m128i vnonsignw_hi = _mm_add_epi32(vmantw_hi, vexpw_hi);
247
248 const __m128i vnonsignh = _mm_packs_epi32(vnonsignw_lo, vnonsignw_hi);
249
250 vh = _mm_or_si128(vh, _mm_andnot_si128(vnanmaskh, vnonsignh));
251
252 _mm_storeu_si128((__m128i*) o, vh);
253 o += 8;
254 }
255 if XNN_UNPREDICTABLE(n != 0) {
256 const __m128 vx_lo = _mm_loadu_ps(input);
257 const float* input_hi = (const float*) ((uintptr_t) input + (n & (4 * sizeof(float))));
258 const __m128 vx_hi = _mm_loadu_ps(input_hi);
259
260 const __m128 vabsx_lo = _mm_and_ps(vx_lo, vnonsign_mask);
261 const __m128 vabsx_hi = _mm_and_ps(vx_hi, vnonsign_mask);
262
263 const __m128 vsignx_lo = _mm_xor_ps(vx_lo, vabsx_lo);
264 const __m128 vsignx_hi = _mm_xor_ps(vx_hi, vabsx_hi);
265 __m128i vbias_lo = _mm_add_epi32(_mm_castps_si128(vabsx_lo), vexp_bias);
266 __m128i vbias_hi = _mm_add_epi32(_mm_castps_si128(vabsx_hi), vexp_bias);
267 __m128 vf_lo = _mm_mul_ps(vabsx_lo, vscale_to_inf);
268 __m128 vf_hi = _mm_mul_ps(vabsx_hi, vscale_to_inf);
269 const __m128i vnanmaskw_lo = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_lo), vexpw_max);
270 const __m128i vnanmaskw_hi = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_hi), vexpw_max);
271
272 vbias_lo = _mm_and_si128(vbias_lo, vexpw_max);
273 vbias_hi = _mm_and_si128(vbias_hi, vexpw_max);
274 vf_lo = _mm_mul_ps(vf_lo, vscale_to_zero);
275 vf_hi = _mm_mul_ps(vf_hi, vscale_to_zero);
276 const __m128i vnanmaskh = _mm_packs_epi32(vnanmaskw_lo, vnanmaskw_hi);
277 const __m128i vsignh = _mm_packs_epi32(_mm_castps_si128(vsignx_lo), _mm_castps_si128(vsignx_hi));
278
279 vbias_lo = _mm_max_epi16(vbias_lo, vbias_min);
280 vbias_hi = _mm_max_epi16(vbias_hi, vbias_min);
281 __m128i vh = _mm_and_si128(vnanh, vnanmaskh);
282
283 vf_lo = _mm_add_ps(vf_lo, _mm_castsi128_ps(vbias_lo));
284 vf_hi = _mm_add_ps(vf_hi, _mm_castsi128_ps(vbias_hi));
285 vh = _mm_or_si128(vh, vsignh);
286
287 __m128i vexpw_lo = _mm_srli_epi32(_mm_castps_si128(vf_lo), 13);
288 __m128i vexpw_hi = _mm_srli_epi32(_mm_castps_si128(vf_hi), 13);
289 const __m128i vmantw_lo = _mm_and_si128(_mm_castps_si128(vf_lo), vmanth_mask);
290 const __m128i vmantw_hi = _mm_and_si128(_mm_castps_si128(vf_hi), vmanth_mask);
291
292 vexpw_lo = _mm_and_si128(vexpw_lo, vexph_mask);
293 vexpw_hi = _mm_and_si128(vexpw_hi, vexph_mask);
294
295 const __m128i vnonsignw_lo = _mm_add_epi32(vmantw_lo, vexpw_lo);
296 const __m128i vnonsignw_hi = _mm_add_epi32(vmantw_hi, vexpw_hi);
297
298 const __m128i vnonsignh = _mm_packs_epi32(vnonsignw_lo, vnonsignw_hi);
299
300 vh = _mm_or_si128(vh, _mm_andnot_si128(vnanmaskh, vnonsignh));
301
302 if (n & (4 * sizeof(float))) {
303 _mm_storel_epi64((__m128i*) o, vh);
304 vh = _mm_unpackhi_epi64(vh, vh);
305 o += 4;
306 }
307 if (n & (2 * sizeof(float))) {
308 unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(vh));
309 vh = _mm_srli_epi64(vh, 32);
310 o += 2;
311 }
312 if (n & (1 * sizeof(float))) {
313 *o = (uint16_t) _mm_cvtsi128_si32(vh);
314 }
315 }
316 }
317