1 // Auto-generated file. Do not edit!
2 // Template: src/f32-f16-vcvt/sse.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <emmintrin.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/unaligned.h>
16 #include <xnnpack/vcvt.h>
17
18
xnn_f32_f16_vcvt_ukernel__sse2_x16(size_t n,const float * input,void * output,const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_f16_vcvt_ukernel__sse2_x16(
20 size_t n,
21 const float* input,
22 void* output,
23 const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
24 {
25 assert(n != 0);
26 assert(n % sizeof(float) == 0);
27 assert(input != NULL);
28 assert(output != NULL);
29
30 const __m128 vnonsign_mask = _mm_load_ps((const float*) params->sse2.nonsign_mask);
31 const __m128i vexp_bias = _mm_load_si128((const __m128i*) params->sse2.exp_bias);
32 const __m128 vscale_to_inf = _mm_load_ps(params->sse2.scale_to_inf);
33 const __m128i vexpw_max = _mm_load_si128((const __m128i*) params->sse2.expw_max);
34 const __m128 vscale_to_zero = _mm_load_ps(params->sse2.scale_to_zero);
35 const __m128i vbias_min = _mm_load_si128((const __m128i*) params->sse2.bias_min);
36 const __m128i vmanth_mask = _mm_load_si128((const __m128i*) params->sse2.manth_mask);
37 const __m128i vexph_mask = _mm_load_si128((const __m128i*) params->sse2.exph_mask);
38 const __m128i vnanh = _mm_load_si128((const __m128i*) params->sse2.nanh);
39
40 uint16_t* o = (uint16_t*) output;
41 for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
42 const __m128 vx0 = _mm_loadu_ps(input);
43 const __m128 vx1 = _mm_loadu_ps(input + 4);
44 const __m128 vx2 = _mm_loadu_ps(input + 8);
45 const __m128 vx3 = _mm_loadu_ps(input + 12);
46 input += 16;
47
48 const __m128 vabsx0 = _mm_and_ps(vx0, vnonsign_mask);
49 const __m128 vabsx1 = _mm_and_ps(vx1, vnonsign_mask);
50 const __m128 vabsx2 = _mm_and_ps(vx2, vnonsign_mask);
51 const __m128 vabsx3 = _mm_and_ps(vx3, vnonsign_mask);
52
53 const __m128 vsignx0 = _mm_xor_ps(vx0, vabsx0);
54 const __m128 vsignx1 = _mm_xor_ps(vx1, vabsx1);
55 const __m128 vsignx2 = _mm_xor_ps(vx2, vabsx2);
56 const __m128 vsignx3 = _mm_xor_ps(vx3, vabsx3);
57
58 __m128i vbias0 = _mm_add_epi32(_mm_castps_si128(vabsx0), vexp_bias);
59 __m128i vbias1 = _mm_add_epi32(_mm_castps_si128(vabsx1), vexp_bias);
60 __m128i vbias2 = _mm_add_epi32(_mm_castps_si128(vabsx2), vexp_bias);
61 __m128i vbias3 = _mm_add_epi32(_mm_castps_si128(vabsx3), vexp_bias);
62
63 __m128 vf0 = _mm_mul_ps(vabsx0, vscale_to_inf);
64 __m128 vf1 = _mm_mul_ps(vabsx1, vscale_to_inf);
65 __m128 vf2 = _mm_mul_ps(vabsx2, vscale_to_inf);
66 __m128 vf3 = _mm_mul_ps(vabsx3, vscale_to_inf);
67
68 const __m128i vnanmaskw0 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx0), vexpw_max);
69 const __m128i vnanmaskw1 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx1), vexpw_max);
70 const __m128i vnanmaskw2 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx2), vexpw_max);
71 const __m128i vnanmaskw3 = _mm_cmpgt_epi32(_mm_castps_si128(vabsx3), vexpw_max);
72
73 vbias0 = _mm_and_si128(vbias0, vexpw_max);
74 vbias1 = _mm_and_si128(vbias1, vexpw_max);
75 vbias2 = _mm_and_si128(vbias2, vexpw_max);
76 vbias3 = _mm_and_si128(vbias3, vexpw_max);
77
78 vf0 = _mm_mul_ps(vf0, vscale_to_zero);
79 vf1 = _mm_mul_ps(vf1, vscale_to_zero);
80 vf2 = _mm_mul_ps(vf2, vscale_to_zero);
81 vf3 = _mm_mul_ps(vf3, vscale_to_zero);
82
83 const __m128i vnanmaskh0 = _mm_packs_epi32(vnanmaskw0, vnanmaskw1);
84 const __m128i vnanmaskh1 = _mm_packs_epi32(vnanmaskw2, vnanmaskw3);
85
86 const __m128i vsignh0 = _mm_packs_epi32(_mm_castps_si128(vsignx0), _mm_castps_si128(vsignx1));
87 const __m128i vsignh1 = _mm_packs_epi32(_mm_castps_si128(vsignx2), _mm_castps_si128(vsignx3));
88
89 vbias0 = _mm_max_epi16(vbias0, vbias_min);
90 vbias1 = _mm_max_epi16(vbias1, vbias_min);
91 vbias2 = _mm_max_epi16(vbias2, vbias_min);
92 vbias3 = _mm_max_epi16(vbias3, vbias_min);
93
94 __m128i vh0 = _mm_and_si128(vnanh, vnanmaskh0);
95 __m128i vh1 = _mm_and_si128(vnanh, vnanmaskh1);
96
97 vf0 = _mm_add_ps(vf0, _mm_castsi128_ps(vbias0));
98 vf1 = _mm_add_ps(vf1, _mm_castsi128_ps(vbias1));
99 vf2 = _mm_add_ps(vf2, _mm_castsi128_ps(vbias2));
100 vf3 = _mm_add_ps(vf3, _mm_castsi128_ps(vbias3));
101
102 vh0 = _mm_or_si128(vh0, vsignh0);
103 vh1 = _mm_or_si128(vh1, vsignh1);
104
105 __m128i vexpw0 = _mm_srli_epi32(_mm_castps_si128(vf0), 13);
106 __m128i vexpw1 = _mm_srli_epi32(_mm_castps_si128(vf1), 13);
107 __m128i vexpw2 = _mm_srli_epi32(_mm_castps_si128(vf2), 13);
108 __m128i vexpw3 = _mm_srli_epi32(_mm_castps_si128(vf3), 13);
109
110 const __m128i vmantw0 = _mm_and_si128(_mm_castps_si128(vf0), vmanth_mask);
111 const __m128i vmantw1 = _mm_and_si128(_mm_castps_si128(vf1), vmanth_mask);
112 const __m128i vmantw2 = _mm_and_si128(_mm_castps_si128(vf2), vmanth_mask);
113 const __m128i vmantw3 = _mm_and_si128(_mm_castps_si128(vf3), vmanth_mask);
114
115 vexpw0 = _mm_and_si128(vexpw0, vexph_mask);
116 vexpw1 = _mm_and_si128(vexpw1, vexph_mask);
117 vexpw2 = _mm_and_si128(vexpw2, vexph_mask);
118 vexpw3 = _mm_and_si128(vexpw3, vexph_mask);
119
120 const __m128i vnonsignw0 = _mm_add_epi32(vmantw0, vexpw0);
121 const __m128i vnonsignw1 = _mm_add_epi32(vmantw1, vexpw1);
122 const __m128i vnonsignw2 = _mm_add_epi32(vmantw2, vexpw2);
123 const __m128i vnonsignw3 = _mm_add_epi32(vmantw3, vexpw3);
124
125 const __m128i vnonsignh0 = _mm_packs_epi32(vnonsignw0, vnonsignw1);
126 const __m128i vnonsignh1 = _mm_packs_epi32(vnonsignw2, vnonsignw3);
127
128 vh0 = _mm_or_si128(vh0, _mm_andnot_si128(vnanmaskh0, vnonsignh0));
129 vh1 = _mm_or_si128(vh1, _mm_andnot_si128(vnanmaskh1, vnonsignh1));
130
131 _mm_storeu_si128((__m128i*) o, vh0);
132 _mm_storeu_si128((__m128i*) (o + 8), vh1);
133 o += 16;
134 }
135 for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
136 const __m128 vx_lo = _mm_loadu_ps(input);
137 const __m128 vx_hi = _mm_loadu_ps(input + 4);
138 input += 8;
139
140 const __m128 vabsx_lo = _mm_and_ps(vx_lo, vnonsign_mask);
141 const __m128 vabsx_hi = _mm_and_ps(vx_hi, vnonsign_mask);
142
143 const __m128 vsignx_lo = _mm_xor_ps(vx_lo, vabsx_lo);
144 const __m128 vsignx_hi = _mm_xor_ps(vx_hi, vabsx_hi);
145 __m128i vbias_lo = _mm_add_epi32(_mm_castps_si128(vabsx_lo), vexp_bias);
146 __m128i vbias_hi = _mm_add_epi32(_mm_castps_si128(vabsx_hi), vexp_bias);
147 __m128 vf_lo = _mm_mul_ps(vabsx_lo, vscale_to_inf);
148 __m128 vf_hi = _mm_mul_ps(vabsx_hi, vscale_to_inf);
149 const __m128i vnanmaskw_lo = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_lo), vexpw_max);
150 const __m128i vnanmaskw_hi = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_hi), vexpw_max);
151
152 vbias_lo = _mm_and_si128(vbias_lo, vexpw_max);
153 vbias_hi = _mm_and_si128(vbias_hi, vexpw_max);
154 vf_lo = _mm_mul_ps(vf_lo, vscale_to_zero);
155 vf_hi = _mm_mul_ps(vf_hi, vscale_to_zero);
156 const __m128i vnanmaskh = _mm_packs_epi32(vnanmaskw_lo, vnanmaskw_hi);
157 const __m128i vsignh = _mm_packs_epi32(_mm_castps_si128(vsignx_lo), _mm_castps_si128(vsignx_hi));
158
159 vbias_lo = _mm_max_epi16(vbias_lo, vbias_min);
160 vbias_hi = _mm_max_epi16(vbias_hi, vbias_min);
161 __m128i vh = _mm_and_si128(vnanh, vnanmaskh);
162
163 vf_lo = _mm_add_ps(vf_lo, _mm_castsi128_ps(vbias_lo));
164 vf_hi = _mm_add_ps(vf_hi, _mm_castsi128_ps(vbias_hi));
165 vh = _mm_or_si128(vh, vsignh);
166
167 __m128i vexpw_lo = _mm_srli_epi32(_mm_castps_si128(vf_lo), 13);
168 __m128i vexpw_hi = _mm_srli_epi32(_mm_castps_si128(vf_hi), 13);
169 const __m128i vmantw_lo = _mm_and_si128(_mm_castps_si128(vf_lo), vmanth_mask);
170 const __m128i vmantw_hi = _mm_and_si128(_mm_castps_si128(vf_hi), vmanth_mask);
171
172 vexpw_lo = _mm_and_si128(vexpw_lo, vexph_mask);
173 vexpw_hi = _mm_and_si128(vexpw_hi, vexph_mask);
174
175 const __m128i vnonsignw_lo = _mm_add_epi32(vmantw_lo, vexpw_lo);
176 const __m128i vnonsignw_hi = _mm_add_epi32(vmantw_hi, vexpw_hi);
177
178 const __m128i vnonsignh = _mm_packs_epi32(vnonsignw_lo, vnonsignw_hi);
179
180 vh = _mm_or_si128(vh, _mm_andnot_si128(vnanmaskh, vnonsignh));
181
182 _mm_storeu_si128((__m128i*) o, vh);
183 o += 8;
184 }
185 if XNN_UNPREDICTABLE(n != 0) {
186 const __m128 vx_lo = _mm_loadu_ps(input);
187 const float* input_hi = (const float*) ((uintptr_t) input + (n & (4 * sizeof(float))));
188 const __m128 vx_hi = _mm_loadu_ps(input_hi);
189
190 const __m128 vabsx_lo = _mm_and_ps(vx_lo, vnonsign_mask);
191 const __m128 vabsx_hi = _mm_and_ps(vx_hi, vnonsign_mask);
192
193 const __m128 vsignx_lo = _mm_xor_ps(vx_lo, vabsx_lo);
194 const __m128 vsignx_hi = _mm_xor_ps(vx_hi, vabsx_hi);
195 __m128i vbias_lo = _mm_add_epi32(_mm_castps_si128(vabsx_lo), vexp_bias);
196 __m128i vbias_hi = _mm_add_epi32(_mm_castps_si128(vabsx_hi), vexp_bias);
197 __m128 vf_lo = _mm_mul_ps(vabsx_lo, vscale_to_inf);
198 __m128 vf_hi = _mm_mul_ps(vabsx_hi, vscale_to_inf);
199 const __m128i vnanmaskw_lo = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_lo), vexpw_max);
200 const __m128i vnanmaskw_hi = _mm_cmpgt_epi32(_mm_castps_si128(vabsx_hi), vexpw_max);
201
202 vbias_lo = _mm_and_si128(vbias_lo, vexpw_max);
203 vbias_hi = _mm_and_si128(vbias_hi, vexpw_max);
204 vf_lo = _mm_mul_ps(vf_lo, vscale_to_zero);
205 vf_hi = _mm_mul_ps(vf_hi, vscale_to_zero);
206 const __m128i vnanmaskh = _mm_packs_epi32(vnanmaskw_lo, vnanmaskw_hi);
207 const __m128i vsignh = _mm_packs_epi32(_mm_castps_si128(vsignx_lo), _mm_castps_si128(vsignx_hi));
208
209 vbias_lo = _mm_max_epi16(vbias_lo, vbias_min);
210 vbias_hi = _mm_max_epi16(vbias_hi, vbias_min);
211 __m128i vh = _mm_and_si128(vnanh, vnanmaskh);
212
213 vf_lo = _mm_add_ps(vf_lo, _mm_castsi128_ps(vbias_lo));
214 vf_hi = _mm_add_ps(vf_hi, _mm_castsi128_ps(vbias_hi));
215 vh = _mm_or_si128(vh, vsignh);
216
217 __m128i vexpw_lo = _mm_srli_epi32(_mm_castps_si128(vf_lo), 13);
218 __m128i vexpw_hi = _mm_srli_epi32(_mm_castps_si128(vf_hi), 13);
219 const __m128i vmantw_lo = _mm_and_si128(_mm_castps_si128(vf_lo), vmanth_mask);
220 const __m128i vmantw_hi = _mm_and_si128(_mm_castps_si128(vf_hi), vmanth_mask);
221
222 vexpw_lo = _mm_and_si128(vexpw_lo, vexph_mask);
223 vexpw_hi = _mm_and_si128(vexpw_hi, vexph_mask);
224
225 const __m128i vnonsignw_lo = _mm_add_epi32(vmantw_lo, vexpw_lo);
226 const __m128i vnonsignw_hi = _mm_add_epi32(vmantw_hi, vexpw_hi);
227
228 const __m128i vnonsignh = _mm_packs_epi32(vnonsignw_lo, vnonsignw_hi);
229
230 vh = _mm_or_si128(vh, _mm_andnot_si128(vnanmaskh, vnonsignh));
231
232 if (n & (4 * sizeof(float))) {
233 _mm_storel_epi64((__m128i*) o, vh);
234 vh = _mm_unpackhi_epi64(vh, vh);
235 o += 4;
236 }
237 if (n & (2 * sizeof(float))) {
238 unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(vh));
239 vh = _mm_srli_epi64(vh, 32);
240 o += 2;
241 }
242 if (n & (1 * sizeof(float))) {
243 *o = (uint16_t) _mm_cvtsi128_si32(vh);
244 }
245 }
246 }
247