1 // Auto-generated file. Do not edit!
2 // Template: src/f32-f16-vcvt/sse.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <emmintrin.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/unaligned.h>
16 #include <xnnpack/vcvt.h>
17
18
xnn_f32_f16_vcvt_ukernel__sse2_x24(size_t n,const float * input,void * output,const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_f16_vcvt_ukernel__sse2_x24(
20 size_t n,
21 const float* input,
22 void* output,
23 const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
24 {
25 assert(n != 0);
26 assert(n % sizeof(float) == 0);
27 assert(input != NULL);
28 assert(output != NULL);
29
30 const __m128 vnonsign_mask = _mm_load_ps((const float*) params->sse2.nonsign_mask);
31 const __m128i vexp_bias = _mm_load_si128((const __m128i*) params->sse2.exp_bias);
32 const __m128 vscale_to_inf = _mm_load_ps(params->sse2.scale_to_inf);
33 const __m128i vexpw_max = _mm_load_si128((const __m128i*) params->sse2.expw_max);
34 const __m128 vscale_to_zero = _mm_load_ps(params->sse2.scale_to_zero);
35 const __m128i vbias_min = _mm_load_si128((const __m128i*) params->sse2.bias_min);
36 const __m128i vmanth_mask = _mm_load_si128((const __m128i*) params->sse2.manth_mask);
37 const __m128i vexph_mask = _mm_load_si128((const __m128i*) params->sse2.exph_mask);
38 const __m128i vnanh = _mm_load_si128((const __m128i*) params->sse2.nanh);
39
40 uint16_t* o = (uint16_t*) output;
41 for (; n >= 24 * sizeof(float); n -= 24 * sizeof(float)) {
42 const __m128 vx0 = _mm_loadu_ps(input);
43 const __m128 vx1 = _mm_loadu_ps(input + 4);
44 const __m128 vx2 = _mm_loadu_ps(input + 8);
45 const __m128 vx3 = _mm_loadu_ps(input + 12);
46 const __m128 vx4 = _mm_loadu_ps(input + 16);
47 const __m128 vx5 = _mm_loadu_ps(input + 20);
48 input += 24;
49
50 const __m128 vabsx0 = _mm_and_ps(vx0, vnonsign_mask);
51 const __m128 vabsx1 = _mm_and_ps(vx1, vnonsign_mask);
52 const __m128 vabsx2 = _mm_and_ps(vx2, vnonsign_mask);
53 const __m128 vabsx3 = _mm_and_ps(vx3, vnonsign_mask);
54 const __m128 vabsx4 = _mm_and_ps(vx4, vnonsign_mask);
55 const __m128 vabsx5 = _mm_and_ps(vx5, vnonsign_mask);
56
57 const __m128 vsignx0 = _mm_xor_ps(vx0, vabsx0);
58 const __m128 vsignx1 = _mm_xor_ps(vx1, vabsx1);
59 const __m128 vsignx2 = _mm_xor_ps(vx2, vabsx2);
60 const __m128 vsignx3 = _mm_xor_ps(vx3, vabsx3);
61 const __m128 vsignx4 = _mm_xor_ps(vx4, vabsx4);
62 const __m128 vsignx5 = _mm_xor_ps(vx5, vabsx5);
63
64 __m128i vbias0 = _mm_add_epi32(_mm_castps_si128(vabsx0), vexp_bias);
65 __m128i vbias1 = _mm_add_epi32(_mm_castps_si128(vabsx1), vexp_bias);
66 __m128i vbias2 = _mm_add_epi32(_mm_castps_si128(vabsx2), vexp_bias);
67 __m128i vbias3 = _mm_add_epi32(_mm_castps_si128(vabsx3), vexp_bias);
68 __m128i vbias4 = _mm_add_epi32(_mm_castps_si128(vabsx4), vexp_bias);
69 __m128i vbias5 = _mm_add_epi32(_mm_castps_si128(vabsx5), vexp_bias);
70
71 __m128 vf0 = _mm_mul_ps(vabsx0, vscale_to_inf);
72 __m128 vf1 = _mm_mul_ps(vabsx1, vscale_to_inf);
73 __m128 vf2 = _mm_mul_ps(vabsx2, vscale_to_inf);
74 __m128 vf3 = _mm_mul_ps(vabsx3, vscale_to_inf);
75 __m128 vf4 = _mm_mul_ps(vabsx4, vscale_to_inf);
76 __m128 vf5 = _mm_mul_ps(vabsx5, vscale_to_inf);
77
78 const __m128i vnanmaskw0 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx0), vexpw_max);
79 const __m128i vnanmaskw1 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx1), vexpw_max);
80 const __m128i vnanmaskw2 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx2), vexpw_max);
81 const __m128i vnanmaskw3 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx3), vexpw_max);
82 const __m128i vnanmaskw4 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx4), vexpw_max);
83 const __m128i vnanmaskw5 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx5), vexpw_max);
84
85 vbias0 = _mm_and_si128(vbias0, vexpw_max);
86 vbias1 = _mm_and_si128(vbias1, vexpw_max);
87 vbias2 = _mm_and_si128(vbias2, vexpw_max);
88 vbias3 = _mm_and_si128(vbias3, vexpw_max);
89 vbias4 = _mm_and_si128(vbias4, vexpw_max);
90 vbias5 = _mm_and_si128(vbias5, vexpw_max);
91
92 vf0 = _mm_mul_ps(vf0, vscale_to_zero);
93 vf1 = _mm_mul_ps(vf1, vscale_to_zero);
94 vf2 = _mm_mul_ps(vf2, vscale_to_zero);
95 vf3 = _mm_mul_ps(vf3, vscale_to_zero);
96 vf4 = _mm_mul_ps(vf4, vscale_to_zero);
97 vf5 = _mm_mul_ps(vf5, vscale_to_zero);
98
99 const __m128i vnanmaskh0 = _mm_packs_epi32(vnanmaskw0, vnanmaskw1);
100 const __m128i vnanmaskh1 = _mm_packs_epi32(vnanmaskw2, vnanmaskw3);
101 const __m128i vnanmaskh2 = _mm_packs_epi32(vnanmaskw4, vnanmaskw5);
102
103 const __m128i vsignh0 = _mm_packs_epi32(_mm_castps_si128(vsignx0), _mm_castps_si128(vsignx1));
104 const __m128i vsignh1 = _mm_packs_epi32(_mm_castps_si128(vsignx2), _mm_castps_si128(vsignx3));
105 const __m128i vsignh2 = _mm_packs_epi32(_mm_castps_si128(vsignx4), _mm_castps_si128(vsignx5));
106
107 vbias0 = _mm_max_epi16(vbias0, vbias_min);
108 vbias1 = _mm_max_epi16(vbias1, vbias_min);
109 vbias2 = _mm_max_epi16(vbias2, vbias_min);
110 vbias3 = _mm_max_epi16(vbias3, vbias_min);
111 vbias4 = _mm_max_epi16(vbias4, vbias_min);
112 vbias5 = _mm_max_epi16(vbias5, vbias_min);
113
114 __m128i vh0 = _mm_and_si128(vnanh, vnanmaskh0);
115 __m128i vh1 = _mm_and_si128(vnanh, vnanmaskh1);
116 __m128i vh2 = _mm_and_si128(vnanh, vnanmaskh2);
117
118 vf0 = _mm_add_ps(vf0, _mm_castsi128_ps(vbias0));
119 vf1 = _mm_add_ps(vf1, _mm_castsi128_ps(vbias1));
120 vf2 = _mm_add_ps(vf2, _mm_castsi128_ps(vbias2));
121 vf3 = _mm_add_ps(vf3, _mm_castsi128_ps(vbias3));
122 vf4 = _mm_add_ps(vf4, _mm_castsi128_ps(vbias4));
123 vf5 = _mm_add_ps(vf5, _mm_castsi128_ps(vbias5));
124
125 vh0 = _mm_or_si128(vh0, vsignh0);
126 vh1 = _mm_or_si128(vh1, vsignh1);
127 vh2 = _mm_or_si128(vh2, vsignh2);
128
129 __m128i vexpw0 = _mm_srli_epi32(_mm_castps_si128(vf0), 13);
130 __m128i vexpw1 = _mm_srli_epi32(_mm_castps_si128(vf1), 13);
131 __m128i vexpw2 = _mm_srli_epi32(_mm_castps_si128(vf2), 13);
132 __m128i vexpw3 = _mm_srli_epi32(_mm_castps_si128(vf3), 13);
133 __m128i vexpw4 = _mm_srli_epi32(_mm_castps_si128(vf4), 13);
134 __m128i vexpw5 = _mm_srli_epi32(_mm_castps_si128(vf5), 13);
135
136 const __m128i vmantw0 = _mm_and_si128(_mm_castps_si128(vf0), vmanth_mask);
137 const __m128i vmantw1 = _mm_and_si128(_mm_castps_si128(vf1), vmanth_mask);
138 const __m128i vmantw2 = _mm_and_si128(_mm_castps_si128(vf2), vmanth_mask);
139 const __m128i vmantw3 = _mm_and_si128(_mm_castps_si128(vf3), vmanth_mask);
140 const __m128i vmantw4 = _mm_and_si128(_mm_castps_si128(vf4), vmanth_mask);
141 const __m128i vmantw5 = _mm_and_si128(_mm_castps_si128(vf5), vmanth_mask);
142
143 vexpw0 = _mm_and_si128(vexpw0, vexph_mask);
144 vexpw1 = _mm_and_si128(vexpw1, vexph_mask);
145 vexpw2 = _mm_and_si128(vexpw2, vexph_mask);
146 vexpw3 = _mm_and_si128(vexpw3, vexph_mask);
147 vexpw4 = _mm_and_si128(vexpw4, vexph_mask);
148 vexpw5 = _mm_and_si128(vexpw5, vexph_mask);
149
150 const __m128i vnonsignw0 = _mm_add_epi32(vmantw0, vexpw0);
151 const __m128i vnonsignw1 = _mm_add_epi32(vmantw1, vexpw1);
152 const __m128i vnonsignw2 = _mm_add_epi32(vmantw2, vexpw2);
153 const __m128i vnonsignw3 = _mm_add_epi32(vmantw3, vexpw3);
154 const __m128i vnonsignw4 = _mm_add_epi32(vmantw4, vexpw4);
155 const __m128i vnonsignw5 = _mm_add_epi32(vmantw5, vexpw5);
156
157 const __m128i vnonsignh0 = _mm_packs_epi32(vnonsignw0, vnonsignw1);
158 const __m128i vnonsignh1 = _mm_packs_epi32(vnonsignw2, vnonsignw3);
159 const __m128i vnonsignh2 = _mm_packs_epi32(vnonsignw4, vnonsignw5);
160
161 vh0 = _mm_or_si128(vh0, _mm_andnot_si128(vnanmaskh0, vnonsignh0));
162 vh1 = _mm_or_si128(vh1, _mm_andnot_si128(vnanmaskh1, vnonsignh1));
163 vh2 = _mm_or_si128(vh2, _mm_andnot_si128(vnanmaskh2, vnonsignh2));
164
165 _mm_storeu_si128((__m128i*) o, vh0);
166 _mm_storeu_si128((__m128i*) (o + 8), vh1);
167 _mm_storeu_si128((__m128i*) (o + 16), vh2);
168 o += 24;
169 }
170 for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
171 const __m128 vx_lo = _mm_loadu_ps(input);
172 const __m128 vx_hi = _mm_loadu_ps(input + 4);
173 input += 8;
174
175 const __m128 vabsx_lo = _mm_and_ps(vx_lo, vnonsign_mask);
176 const __m128 vabsx_hi = _mm_and_ps(vx_hi, vnonsign_mask);
177
178 const __m128 vsignx_lo = _mm_xor_ps(vx_lo, vabsx_lo);
179 const __m128 vsignx_hi = _mm_xor_ps(vx_hi, vabsx_hi);
180 __m128i vbias_lo = _mm_add_epi32(_mm_castps_si128(vabsx_lo), vexp_bias);
181 __m128i vbias_hi = _mm_add_epi32(_mm_castps_si128(vabsx_hi), vexp_bias);
182 __m128 vf_lo = _mm_mul_ps(vabsx_lo, vscale_to_inf);
183 __m128 vf_hi = _mm_mul_ps(vabsx_hi, vscale_to_inf);
184 const __m128i vnanmaskw_lo = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_lo), vexpw_max);
185 const __m128i vnanmaskw_hi = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_hi), vexpw_max);
186
187 vbias_lo = _mm_and_si128(vbias_lo, vexpw_max);
188 vbias_hi = _mm_and_si128(vbias_hi, vexpw_max);
189 vf_lo = _mm_mul_ps(vf_lo, vscale_to_zero);
190 vf_hi = _mm_mul_ps(vf_hi, vscale_to_zero);
191 const __m128i vnanmaskh = _mm_packs_epi32(vnanmaskw_lo, vnanmaskw_hi);
192 const __m128i vsignh = _mm_packs_epi32(_mm_castps_si128(vsignx_lo), _mm_castps_si128(vsignx_hi));
193
194 vbias_lo = _mm_max_epi16(vbias_lo, vbias_min);
195 vbias_hi = _mm_max_epi16(vbias_hi, vbias_min);
196 __m128i vh = _mm_and_si128(vnanh, vnanmaskh);
197
198 vf_lo = _mm_add_ps(vf_lo, _mm_castsi128_ps(vbias_lo));
199 vf_hi = _mm_add_ps(vf_hi, _mm_castsi128_ps(vbias_hi));
200 vh = _mm_or_si128(vh, vsignh);
201
202 __m128i vexpw_lo = _mm_srli_epi32(_mm_castps_si128(vf_lo), 13);
203 __m128i vexpw_hi = _mm_srli_epi32(_mm_castps_si128(vf_hi), 13);
204 const __m128i vmantw_lo = _mm_and_si128(_mm_castps_si128(vf_lo), vmanth_mask);
205 const __m128i vmantw_hi = _mm_and_si128(_mm_castps_si128(vf_hi), vmanth_mask);
206
207 vexpw_lo = _mm_and_si128(vexpw_lo, vexph_mask);
208 vexpw_hi = _mm_and_si128(vexpw_hi, vexph_mask);
209
210 const __m128i vnonsignw_lo = _mm_add_epi32(vmantw_lo, vexpw_lo);
211 const __m128i vnonsignw_hi = _mm_add_epi32(vmantw_hi, vexpw_hi);
212
213 const __m128i vnonsignh = _mm_packs_epi32(vnonsignw_lo, vnonsignw_hi);
214
215 vh = _mm_or_si128(vh, _mm_andnot_si128(vnanmaskh, vnonsignh));
216
217 _mm_storeu_si128((__m128i*) o, vh);
218 o += 8;
219 }
220 if XNN_UNPREDICTABLE(n != 0) {
221 const __m128 vx_lo = _mm_loadu_ps(input);
222 const float* input_hi = (const float*) ((uintptr_t) input + (n & (4 * sizeof(float))));
223 const __m128 vx_hi = _mm_loadu_ps(input_hi);
224
225 const __m128 vabsx_lo = _mm_and_ps(vx_lo, vnonsign_mask);
226 const __m128 vabsx_hi = _mm_and_ps(vx_hi, vnonsign_mask);
227
228 const __m128 vsignx_lo = _mm_xor_ps(vx_lo, vabsx_lo);
229 const __m128 vsignx_hi = _mm_xor_ps(vx_hi, vabsx_hi);
230 __m128i vbias_lo = _mm_add_epi32(_mm_castps_si128(vabsx_lo), vexp_bias);
231 __m128i vbias_hi = _mm_add_epi32(_mm_castps_si128(vabsx_hi), vexp_bias);
232 __m128 vf_lo = _mm_mul_ps(vabsx_lo, vscale_to_inf);
233 __m128 vf_hi = _mm_mul_ps(vabsx_hi, vscale_to_inf);
234 const __m128i vnanmaskw_lo = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_lo), vexpw_max);
235 const __m128i vnanmaskw_hi = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_hi), vexpw_max);
236
237 vbias_lo = _mm_and_si128(vbias_lo, vexpw_max);
238 vbias_hi = _mm_and_si128(vbias_hi, vexpw_max);
239 vf_lo = _mm_mul_ps(vf_lo, vscale_to_zero);
240 vf_hi = _mm_mul_ps(vf_hi, vscale_to_zero);
241 const __m128i vnanmaskh = _mm_packs_epi32(vnanmaskw_lo, vnanmaskw_hi);
242 const __m128i vsignh = _mm_packs_epi32(_mm_castps_si128(vsignx_lo), _mm_castps_si128(vsignx_hi));
243
244 vbias_lo = _mm_max_epi16(vbias_lo, vbias_min);
245 vbias_hi = _mm_max_epi16(vbias_hi, vbias_min);
246 __m128i vh = _mm_and_si128(vnanh, vnanmaskh);
247
248 vf_lo = _mm_add_ps(vf_lo, _mm_castsi128_ps(vbias_lo));
249 vf_hi = _mm_add_ps(vf_hi, _mm_castsi128_ps(vbias_hi));
250 vh = _mm_or_si128(vh, vsignh);
251
252 __m128i vexpw_lo = _mm_srli_epi32(_mm_castps_si128(vf_lo), 13);
253 __m128i vexpw_hi = _mm_srli_epi32(_mm_castps_si128(vf_hi), 13);
254 const __m128i vmantw_lo = _mm_and_si128(_mm_castps_si128(vf_lo), vmanth_mask);
255 const __m128i vmantw_hi = _mm_and_si128(_mm_castps_si128(vf_hi), vmanth_mask);
256
257 vexpw_lo = _mm_and_si128(vexpw_lo, vexph_mask);
258 vexpw_hi = _mm_and_si128(vexpw_hi, vexph_mask);
259
260 const __m128i vnonsignw_lo = _mm_add_epi32(vmantw_lo, vexpw_lo);
261 const __m128i vnonsignw_hi = _mm_add_epi32(vmantw_hi, vexpw_hi);
262
263 const __m128i vnonsignh = _mm_packs_epi32(vnonsignw_lo, vnonsignw_hi);
264
265 vh = _mm_or_si128(vh, _mm_andnot_si128(vnanmaskh, vnonsignh));
266
267 if (n & (4 * sizeof(float))) {
268 _mm_storel_epi64((__m128i*) o, vh);
269 vh = _mm_unpackhi_epi64(vh, vh);
270 o += 4;
271 }
272 if (n & (2 * sizeof(float))) {
273 unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(vh));
274 vh = _mm_srli_epi64(vh, 32);
275 o += 2;
276 }
277 if (n & (1 * sizeof(float))) {
278 *o = (uint16_t) _mm_cvtsi128_si32(vh);
279 }
280 }
281 }
282