1 // Auto-generated file. Do not edit!
2 // Template: src/f32-f16-vcvt/sse.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <smmintrin.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/unaligned.h>
16 #include <xnnpack/vcvt.h>
17
18
xnn_f32_f16_vcvt_ukernel__sse41_x24(size_t n,const float * input,void * output,const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_f16_vcvt_ukernel__sse41_x24(
20 size_t n,
21 const float* input,
22 void* output,
23 const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
24 {
25 assert(n != 0);
26 assert(n % sizeof(float) == 0);
27 assert(input != NULL);
28 assert(output != NULL);
29
30 const __m128 vnonsign_mask = _mm_load_ps((const float*) params->sse2.nonsign_mask);
31 const __m128i vexp_bias = _mm_load_si128((const __m128i*) params->sse2.exp_bias);
32 const __m128 vscale_to_inf = _mm_load_ps(params->sse2.scale_to_inf);
33 const __m128i vexpw_max = _mm_load_si128((const __m128i*) params->sse2.expw_max);
34 const __m128 vscale_to_zero = _mm_load_ps(params->sse2.scale_to_zero);
35 const __m128i vbias_min = _mm_load_si128((const __m128i*) params->sse2.bias_min);
36 const __m128i vmanth_mask = _mm_load_si128((const __m128i*) params->sse2.manth_mask);
37 const __m128i vexph_mask = _mm_load_si128((const __m128i*) params->sse2.exph_mask);
38 const __m128i vnanh = _mm_load_si128((const __m128i*) params->sse2.nanh);
39
40 uint16_t* o = (uint16_t*) output;
41 for (; n >= 24 * sizeof(float); n -= 24 * sizeof(float)) {
42 const __m128 vx0 = _mm_loadu_ps(input);
43 const __m128 vx1 = _mm_loadu_ps(input + 4);
44 const __m128 vx2 = _mm_loadu_ps(input + 8);
45 const __m128 vx3 = _mm_loadu_ps(input + 12);
46 const __m128 vx4 = _mm_loadu_ps(input + 16);
47 const __m128 vx5 = _mm_loadu_ps(input + 20);
48 input += 24;
49
50 const __m128 vabsx0 = _mm_and_ps(vx0, vnonsign_mask);
51 const __m128 vabsx1 = _mm_and_ps(vx1, vnonsign_mask);
52 const __m128 vabsx2 = _mm_and_ps(vx2, vnonsign_mask);
53 const __m128 vabsx3 = _mm_and_ps(vx3, vnonsign_mask);
54 const __m128 vabsx4 = _mm_and_ps(vx4, vnonsign_mask);
55 const __m128 vabsx5 = _mm_and_ps(vx5, vnonsign_mask);
56
57 const __m128 vsignx0 = _mm_xor_ps(vx0, vabsx0);
58 const __m128 vsignx1 = _mm_xor_ps(vx1, vabsx1);
59 const __m128 vsignx2 = _mm_xor_ps(vx2, vabsx2);
60 const __m128 vsignx3 = _mm_xor_ps(vx3, vabsx3);
61 const __m128 vsignx4 = _mm_xor_ps(vx4, vabsx4);
62 const __m128 vsignx5 = _mm_xor_ps(vx5, vabsx5);
63
64 __m128i vbias0 = _mm_add_epi32(_mm_castps_si128(vabsx0), vexp_bias);
65 __m128i vbias1 = _mm_add_epi32(_mm_castps_si128(vabsx1), vexp_bias);
66 __m128i vbias2 = _mm_add_epi32(_mm_castps_si128(vabsx2), vexp_bias);
67 __m128i vbias3 = _mm_add_epi32(_mm_castps_si128(vabsx3), vexp_bias);
68 __m128i vbias4 = _mm_add_epi32(_mm_castps_si128(vabsx4), vexp_bias);
69 __m128i vbias5 = _mm_add_epi32(_mm_castps_si128(vabsx5), vexp_bias);
70
71 __m128 vf0 = _mm_mul_ps(vabsx0, vscale_to_inf);
72 __m128 vf1 = _mm_mul_ps(vabsx1, vscale_to_inf);
73 __m128 vf2 = _mm_mul_ps(vabsx2, vscale_to_inf);
74 __m128 vf3 = _mm_mul_ps(vabsx3, vscale_to_inf);
75 __m128 vf4 = _mm_mul_ps(vabsx4, vscale_to_inf);
76 __m128 vf5 = _mm_mul_ps(vabsx5, vscale_to_inf);
77
78 const __m128i vnanmaskw0 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx0), vexpw_max);
79 const __m128i vnanmaskw1 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx1), vexpw_max);
80 const __m128i vnanmaskw2 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx2), vexpw_max);
81 const __m128i vnanmaskw3 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx3), vexpw_max);
82 const __m128i vnanmaskw4 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx4), vexpw_max);
83 const __m128i vnanmaskw5 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx5), vexpw_max);
84
85 vbias0 = _mm_and_si128(vbias0, vexpw_max);
86 vbias1 = _mm_and_si128(vbias1, vexpw_max);
87 vbias2 = _mm_and_si128(vbias2, vexpw_max);
88 vbias3 = _mm_and_si128(vbias3, vexpw_max);
89 vbias4 = _mm_and_si128(vbias4, vexpw_max);
90 vbias5 = _mm_and_si128(vbias5, vexpw_max);
91
92 vf0 = _mm_mul_ps(vf0, vscale_to_zero);
93 vf1 = _mm_mul_ps(vf1, vscale_to_zero);
94 vf2 = _mm_mul_ps(vf2, vscale_to_zero);
95 vf3 = _mm_mul_ps(vf3, vscale_to_zero);
96 vf4 = _mm_mul_ps(vf4, vscale_to_zero);
97 vf5 = _mm_mul_ps(vf5, vscale_to_zero);
98
99 const __m128i vnanmaskh0 = _mm_packs_epi32(vnanmaskw0, vnanmaskw1);
100 const __m128i vnanmaskh1 = _mm_packs_epi32(vnanmaskw2, vnanmaskw3);
101 const __m128i vnanmaskh2 = _mm_packs_epi32(vnanmaskw4, vnanmaskw5);
102
103 const __m128i vsignh0 = _mm_packs_epi32(_mm_castps_si128(vsignx0), _mm_castps_si128(vsignx1));
104 const __m128i vsignh1 = _mm_packs_epi32(_mm_castps_si128(vsignx2), _mm_castps_si128(vsignx3));
105 const __m128i vsignh2 = _mm_packs_epi32(_mm_castps_si128(vsignx4), _mm_castps_si128(vsignx5));
106
107 vbias0 = _mm_max_epi16(vbias0, vbias_min);
108 vbias1 = _mm_max_epi16(vbias1, vbias_min);
109 vbias2 = _mm_max_epi16(vbias2, vbias_min);
110 vbias3 = _mm_max_epi16(vbias3, vbias_min);
111 vbias4 = _mm_max_epi16(vbias4, vbias_min);
112 vbias5 = _mm_max_epi16(vbias5, vbias_min);
113
114
115 vf0 = _mm_add_ps(vf0, _mm_castsi128_ps(vbias0));
116 vf1 = _mm_add_ps(vf1, _mm_castsi128_ps(vbias1));
117 vf2 = _mm_add_ps(vf2, _mm_castsi128_ps(vbias2));
118 vf3 = _mm_add_ps(vf3, _mm_castsi128_ps(vbias3));
119 vf4 = _mm_add_ps(vf4, _mm_castsi128_ps(vbias4));
120 vf5 = _mm_add_ps(vf5, _mm_castsi128_ps(vbias5));
121
122
123 __m128i vexpw0 = _mm_srli_epi32(_mm_castps_si128(vf0), 13);
124 __m128i vexpw1 = _mm_srli_epi32(_mm_castps_si128(vf1), 13);
125 __m128i vexpw2 = _mm_srli_epi32(_mm_castps_si128(vf2), 13);
126 __m128i vexpw3 = _mm_srli_epi32(_mm_castps_si128(vf3), 13);
127 __m128i vexpw4 = _mm_srli_epi32(_mm_castps_si128(vf4), 13);
128 __m128i vexpw5 = _mm_srli_epi32(_mm_castps_si128(vf5), 13);
129
130 const __m128i vmantw0 = _mm_and_si128(_mm_castps_si128(vf0), vmanth_mask);
131 const __m128i vmantw1 = _mm_and_si128(_mm_castps_si128(vf1), vmanth_mask);
132 const __m128i vmantw2 = _mm_and_si128(_mm_castps_si128(vf2), vmanth_mask);
133 const __m128i vmantw3 = _mm_and_si128(_mm_castps_si128(vf3), vmanth_mask);
134 const __m128i vmantw4 = _mm_and_si128(_mm_castps_si128(vf4), vmanth_mask);
135 const __m128i vmantw5 = _mm_and_si128(_mm_castps_si128(vf5), vmanth_mask);
136
137 vexpw0 = _mm_and_si128(vexpw0, vexph_mask);
138 vexpw1 = _mm_and_si128(vexpw1, vexph_mask);
139 vexpw2 = _mm_and_si128(vexpw2, vexph_mask);
140 vexpw3 = _mm_and_si128(vexpw3, vexph_mask);
141 vexpw4 = _mm_and_si128(vexpw4, vexph_mask);
142 vexpw5 = _mm_and_si128(vexpw5, vexph_mask);
143
144 const __m128i vnonsignw0 = _mm_add_epi32(vmantw0, vexpw0);
145 const __m128i vnonsignw1 = _mm_add_epi32(vmantw1, vexpw1);
146 const __m128i vnonsignw2 = _mm_add_epi32(vmantw2, vexpw2);
147 const __m128i vnonsignw3 = _mm_add_epi32(vmantw3, vexpw3);
148 const __m128i vnonsignw4 = _mm_add_epi32(vmantw4, vexpw4);
149 const __m128i vnonsignw5 = _mm_add_epi32(vmantw5, vexpw5);
150
151 const __m128i vnonsignh0 = _mm_packs_epi32(vnonsignw0, vnonsignw1);
152 const __m128i vnonsignh1 = _mm_packs_epi32(vnonsignw2, vnonsignw3);
153 const __m128i vnonsignh2 = _mm_packs_epi32(vnonsignw4, vnonsignw5);
154
155 const __m128i vabsh0 = _mm_blendv_epi8(vnonsignh0, vnanh, vnanmaskh0);
156 const __m128i vabsh1 = _mm_blendv_epi8(vnonsignh1, vnanh, vnanmaskh1);
157 const __m128i vabsh2 = _mm_blendv_epi8(vnonsignh2, vnanh, vnanmaskh2);
158
159 const __m128i vh0 = _mm_or_si128(vabsh0, vsignh0);
160 const __m128i vh1 = _mm_or_si128(vabsh1, vsignh1);
161 const __m128i vh2 = _mm_or_si128(vabsh2, vsignh2);
162
163 _mm_storeu_si128((__m128i*) o, vh0);
164 _mm_storeu_si128((__m128i*) (o + 8), vh1);
165 _mm_storeu_si128((__m128i*) (o + 16), vh2);
166 o += 24;
167 }
168 for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
169 const __m128 vx_lo = _mm_loadu_ps(input);
170 const __m128 vx_hi = _mm_loadu_ps(input + 4);
171 input += 8;
172
173 const __m128 vabsx_lo = _mm_and_ps(vx_lo, vnonsign_mask);
174 const __m128 vabsx_hi = _mm_and_ps(vx_hi, vnonsign_mask);
175
176 const __m128 vsignx_lo = _mm_xor_ps(vx_lo, vabsx_lo);
177 const __m128 vsignx_hi = _mm_xor_ps(vx_hi, vabsx_hi);
178 __m128i vbias_lo = _mm_add_epi32(_mm_castps_si128(vabsx_lo), vexp_bias);
179 __m128i vbias_hi = _mm_add_epi32(_mm_castps_si128(vabsx_hi), vexp_bias);
180 __m128 vf_lo = _mm_mul_ps(vabsx_lo, vscale_to_inf);
181 __m128 vf_hi = _mm_mul_ps(vabsx_hi, vscale_to_inf);
182 const __m128i vnanmaskw_lo = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_lo), vexpw_max);
183 const __m128i vnanmaskw_hi = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_hi), vexpw_max);
184
185 vbias_lo = _mm_and_si128(vbias_lo, vexpw_max);
186 vbias_hi = _mm_and_si128(vbias_hi, vexpw_max);
187 vf_lo = _mm_mul_ps(vf_lo, vscale_to_zero);
188 vf_hi = _mm_mul_ps(vf_hi, vscale_to_zero);
189 const __m128i vnanmaskh = _mm_packs_epi32(vnanmaskw_lo, vnanmaskw_hi);
190 const __m128i vsignh = _mm_packs_epi32(_mm_castps_si128(vsignx_lo), _mm_castps_si128(vsignx_hi));
191
192 vbias_lo = _mm_max_epi16(vbias_lo, vbias_min);
193 vbias_hi = _mm_max_epi16(vbias_hi, vbias_min);
194
195 vf_lo = _mm_add_ps(vf_lo, _mm_castsi128_ps(vbias_lo));
196 vf_hi = _mm_add_ps(vf_hi, _mm_castsi128_ps(vbias_hi));
197
198 __m128i vexpw_lo = _mm_srli_epi32(_mm_castps_si128(vf_lo), 13);
199 __m128i vexpw_hi = _mm_srli_epi32(_mm_castps_si128(vf_hi), 13);
200 const __m128i vmantw_lo = _mm_and_si128(_mm_castps_si128(vf_lo), vmanth_mask);
201 const __m128i vmantw_hi = _mm_and_si128(_mm_castps_si128(vf_hi), vmanth_mask);
202
203 vexpw_lo = _mm_and_si128(vexpw_lo, vexph_mask);
204 vexpw_hi = _mm_and_si128(vexpw_hi, vexph_mask);
205
206 const __m128i vnonsignw_lo = _mm_add_epi32(vmantw_lo, vexpw_lo);
207 const __m128i vnonsignw_hi = _mm_add_epi32(vmantw_hi, vexpw_hi);
208
209 const __m128i vnonsignh = _mm_packs_epi32(vnonsignw_lo, vnonsignw_hi);
210
211 const __m128i vabsh = _mm_blendv_epi8(vnonsignh, vnanh, vnanmaskh);
212
213 const __m128i vh = _mm_or_si128(vabsh, vsignh);
214
215 _mm_storeu_si128((__m128i*) o, vh);
216 o += 8;
217 }
218 if XNN_UNPREDICTABLE(n != 0) {
219 const __m128 vx_lo = _mm_loadu_ps(input);
220 const float* input_hi = (const float*) ((uintptr_t) input + (n & (4 * sizeof(float))));
221 const __m128 vx_hi = _mm_loadu_ps(input_hi);
222
223 const __m128 vabsx_lo = _mm_and_ps(vx_lo, vnonsign_mask);
224 const __m128 vabsx_hi = _mm_and_ps(vx_hi, vnonsign_mask);
225
226 const __m128 vsignx_lo = _mm_xor_ps(vx_lo, vabsx_lo);
227 const __m128 vsignx_hi = _mm_xor_ps(vx_hi, vabsx_hi);
228 __m128i vbias_lo = _mm_add_epi32(_mm_castps_si128(vabsx_lo), vexp_bias);
229 __m128i vbias_hi = _mm_add_epi32(_mm_castps_si128(vabsx_hi), vexp_bias);
230 __m128 vf_lo = _mm_mul_ps(vabsx_lo, vscale_to_inf);
231 __m128 vf_hi = _mm_mul_ps(vabsx_hi, vscale_to_inf);
232 const __m128i vnanmaskw_lo = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_lo), vexpw_max);
233 const __m128i vnanmaskw_hi = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_hi), vexpw_max);
234
235 vbias_lo = _mm_and_si128(vbias_lo, vexpw_max);
236 vbias_hi = _mm_and_si128(vbias_hi, vexpw_max);
237 vf_lo = _mm_mul_ps(vf_lo, vscale_to_zero);
238 vf_hi = _mm_mul_ps(vf_hi, vscale_to_zero);
239 const __m128i vnanmaskh = _mm_packs_epi32(vnanmaskw_lo, vnanmaskw_hi);
240 const __m128i vsignh = _mm_packs_epi32(_mm_castps_si128(vsignx_lo), _mm_castps_si128(vsignx_hi));
241
242 vbias_lo = _mm_max_epi16(vbias_lo, vbias_min);
243 vbias_hi = _mm_max_epi16(vbias_hi, vbias_min);
244
245 vf_lo = _mm_add_ps(vf_lo, _mm_castsi128_ps(vbias_lo));
246 vf_hi = _mm_add_ps(vf_hi, _mm_castsi128_ps(vbias_hi));
247
248 __m128i vexpw_lo = _mm_srli_epi32(_mm_castps_si128(vf_lo), 13);
249 __m128i vexpw_hi = _mm_srli_epi32(_mm_castps_si128(vf_hi), 13);
250 const __m128i vmantw_lo = _mm_and_si128(_mm_castps_si128(vf_lo), vmanth_mask);
251 const __m128i vmantw_hi = _mm_and_si128(_mm_castps_si128(vf_hi), vmanth_mask);
252
253 vexpw_lo = _mm_and_si128(vexpw_lo, vexph_mask);
254 vexpw_hi = _mm_and_si128(vexpw_hi, vexph_mask);
255
256 const __m128i vnonsignw_lo = _mm_add_epi32(vmantw_lo, vexpw_lo);
257 const __m128i vnonsignw_hi = _mm_add_epi32(vmantw_hi, vexpw_hi);
258
259 const __m128i vnonsignh = _mm_packs_epi32(vnonsignw_lo, vnonsignw_hi);
260
261 const __m128i vabsh = _mm_blendv_epi8(vnonsignh, vnanh, vnanmaskh);
262
263 __m128i vh = _mm_or_si128(vabsh, vsignh);
264
265 if (n & (4 * sizeof(float))) {
266 _mm_storel_epi64((__m128i*) o, vh);
267 vh = _mm_unpackhi_epi64(vh, vh);
268 o += 4;
269 }
270 if (n & (2 * sizeof(float))) {
271 unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(vh));
272 vh = _mm_srli_epi64(vh, 32);
273 o += 2;
274 }
275 if (n & (1 * sizeof(float))) {
276 *o = (uint16_t) _mm_extract_epi16(vh, 0);
277 }
278 }
279 }
280