1 // Auto-generated file. Do not edit!
2 // Template: src/f16-f32-vcvt/sse-int16.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <emmintrin.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/vcvt.h>
16
17
xnn_f16_f32_vcvt_ukernel__sse2_int16_x32(size_t n,const void * input,float * output,const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f16_f32_vcvt_ukernel__sse2_int16_x32(
19 size_t n,
20 const void* input,
21 float* output,
22 const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
23 {
24 assert(n != 0);
25 assert(n % sizeof(uint16_t) == 0);
26 assert(input != NULL);
27 assert(output != NULL);
28
29 const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->sse_int16.sign_mask);
30 const __m128i vexp_offset = _mm_load_si128((const __m128i*) params->sse_int16.exp_offset);
31 const __m128 vexp_scale = _mm_load_ps(params->sse_int16.exp_scale);
32 const __m128i vmagic_mask = _mm_load_si128((const __m128i*) params->sse_int16.magic_mask);
33 const __m128 vmagic_bias = _mm_load_ps(params->sse_int16.magic_bias);
34 const __m128i vdenorm_cutoff = _mm_load_si128((const __m128i*) params->sse_int16.denorm_cutoff);
35
36 const uint16_t* i = (const uint16_t*) input;
37 for (; n >= 32 * sizeof(uint16_t); n -= 32 * sizeof(uint16_t)) {
38 const __m128i vh0 = _mm_loadu_si128((const __m128i*) i);
39 const __m128i vh1 = _mm_loadu_si128((const __m128i*) (i + 8));
40 const __m128i vh2 = _mm_loadu_si128((const __m128i*) (i + 16));
41 const __m128i vh3 = _mm_loadu_si128((const __m128i*) (i + 24));
42 i += 32;
43
44 const __m128i vsign0 = _mm_and_si128(vh0, vsign_mask);
45 const __m128i vsign1 = _mm_and_si128(vh1, vsign_mask);
46 const __m128i vsign2 = _mm_and_si128(vh2, vsign_mask);
47 const __m128i vsign3 = _mm_and_si128(vh3, vsign_mask);
48
49 const __m128i vnonsign0 = _mm_xor_si128(vh0, vsign0);
50 const __m128i vnonsign1 = _mm_xor_si128(vh1, vsign1);
51 const __m128i vnonsign2 = _mm_xor_si128(vh2, vsign2);
52 const __m128i vnonsign3 = _mm_xor_si128(vh3, vsign3);
53
54 const __m128i vprenorm0 = _mm_slli_epi16(vnonsign0, 13);
55 const __m128i vprenorm1 = _mm_add_epi16(_mm_srli_epi16(vnonsign0, 3), vexp_offset);
56 const __m128i vprenorm2 = _mm_slli_epi16(vnonsign1, 13);
57 const __m128i vprenorm3 = _mm_add_epi16(_mm_srli_epi16(vnonsign1, 3), vexp_offset);
58 const __m128i vprenorm4 = _mm_slli_epi16(vnonsign2, 13);
59 const __m128i vprenorm5 = _mm_add_epi16(_mm_srli_epi16(vnonsign2, 3), vexp_offset);
60 const __m128i vprenorm6 = _mm_slli_epi16(vnonsign3, 13);
61 const __m128i vprenorm7 = _mm_add_epi16(_mm_srli_epi16(vnonsign3, 3), vexp_offset);
62
63 const __m128i vnorm0 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm0, vprenorm1)), vexp_scale));
64 const __m128i vnorm1 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm0, vprenorm1)), vexp_scale));
65 const __m128i vnorm2 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm2, vprenorm3)), vexp_scale));
66 const __m128i vnorm3 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm2, vprenorm3)), vexp_scale));
67 const __m128i vnorm4 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm4, vprenorm5)), vexp_scale));
68 const __m128i vnorm5 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm4, vprenorm5)), vexp_scale));
69 const __m128i vnorm6 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm6, vprenorm7)), vexp_scale));
70 const __m128i vnorm7 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm6, vprenorm7)), vexp_scale));
71
72 const __m128i vdenorm0 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign0, vmagic_mask)), vmagic_bias));
73 const __m128i vdenorm1 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign0, vmagic_mask)), vmagic_bias));
74 const __m128i vdenorm2 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign1, vmagic_mask)), vmagic_bias));
75 const __m128i vdenorm3 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign1, vmagic_mask)), vmagic_bias));
76 const __m128i vdenorm4 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign2, vmagic_mask)), vmagic_bias));
77 const __m128i vdenorm5 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign2, vmagic_mask)), vmagic_bias));
78 const __m128i vdenorm6 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign3, vmagic_mask)), vmagic_bias));
79 const __m128i vdenorm7 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign3, vmagic_mask)), vmagic_bias));
80
81 const __m128i vmask0 = _mm_cmpgt_epi16(vnonsign0, vdenorm_cutoff);
82 const __m128i vmask1 = _mm_cmpgt_epi16(vnonsign1, vdenorm_cutoff);
83 const __m128i vmask2 = _mm_cmpgt_epi16(vnonsign2, vdenorm_cutoff);
84 const __m128i vmask3 = _mm_cmpgt_epi16(vnonsign3, vdenorm_cutoff);
85
86 const __m128i vxmask0 = _mm_unpacklo_epi16(vmask0, vmask0);
87 const __m128i vf0 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign0),
88 _mm_or_si128(_mm_and_si128(vxmask0, vnorm0), _mm_andnot_si128(vxmask0, vdenorm0)));
89 const __m128i vxmask1 = _mm_unpackhi_epi16(vmask0, vmask0);
90 const __m128i vf1 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign0),
91 _mm_or_si128(_mm_and_si128(vxmask1, vnorm1), _mm_andnot_si128(vxmask1, vdenorm1)));
92 const __m128i vxmask2 = _mm_unpacklo_epi16(vmask1, vmask1);
93 const __m128i vf2 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign1),
94 _mm_or_si128(_mm_and_si128(vxmask2, vnorm2), _mm_andnot_si128(vxmask2, vdenorm2)));
95 const __m128i vxmask3 = _mm_unpackhi_epi16(vmask1, vmask1);
96 const __m128i vf3 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign1),
97 _mm_or_si128(_mm_and_si128(vxmask3, vnorm3), _mm_andnot_si128(vxmask3, vdenorm3)));
98 const __m128i vxmask4 = _mm_unpacklo_epi16(vmask2, vmask2);
99 const __m128i vf4 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign2),
100 _mm_or_si128(_mm_and_si128(vxmask4, vnorm4), _mm_andnot_si128(vxmask4, vdenorm4)));
101 const __m128i vxmask5 = _mm_unpackhi_epi16(vmask2, vmask2);
102 const __m128i vf5 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign2),
103 _mm_or_si128(_mm_and_si128(vxmask5, vnorm5), _mm_andnot_si128(vxmask5, vdenorm5)));
104 const __m128i vxmask6 = _mm_unpacklo_epi16(vmask3, vmask3);
105 const __m128i vf6 = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign3),
106 _mm_or_si128(_mm_and_si128(vxmask6, vnorm6), _mm_andnot_si128(vxmask6, vdenorm6)));
107 const __m128i vxmask7 = _mm_unpackhi_epi16(vmask3, vmask3);
108 const __m128i vf7 = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign3),
109 _mm_or_si128(_mm_and_si128(vxmask7, vnorm7), _mm_andnot_si128(vxmask7, vdenorm7)));
110
111 _mm_storeu_ps(output, _mm_castsi128_ps(vf0));
112 _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf1));
113 _mm_storeu_ps(output + 8, _mm_castsi128_ps(vf2));
114 _mm_storeu_ps(output + 12, _mm_castsi128_ps(vf3));
115 _mm_storeu_ps(output + 16, _mm_castsi128_ps(vf4));
116 _mm_storeu_ps(output + 20, _mm_castsi128_ps(vf5));
117 _mm_storeu_ps(output + 24, _mm_castsi128_ps(vf6));
118 _mm_storeu_ps(output + 28, _mm_castsi128_ps(vf7));
119 output += 32;
120 }
121 for (; n >= 8 * sizeof(uint16_t); n -= 8 * sizeof(uint16_t)) {
122 const __m128i vh = _mm_loadu_si128((const __m128i*) i);
123 i += 8;
124
125 const __m128i vsign = _mm_and_si128(vh, vsign_mask);
126
127 const __m128i vnonsign = _mm_xor_si128(vh, vsign);
128
129 const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
130 const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
131
132 const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
133 const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
134
135 const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
136 const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
137
138 const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
139
140 const __m128i vxmask_lo = _mm_unpacklo_epi16(vmask, vmask);
141 const __m128i vf_lo = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
142 _mm_or_si128(_mm_and_si128(vxmask_lo, vnorm_lo), _mm_andnot_si128(vxmask_lo, vdenorm_lo)));
143
144 const __m128i vxmask_hi = _mm_unpackhi_epi16(vmask, vmask);
145 const __m128i vf_hi = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
146 _mm_or_si128(_mm_and_si128(vxmask_hi, vnorm_hi), _mm_andnot_si128(vxmask_hi, vdenorm_hi)));
147
148 _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo));
149 _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi));
150 output += 8;
151 }
152 if XNN_UNPREDICTABLE(n != 0) {
153 const __m128i vh = _mm_loadu_si128((const __m128i*) i);
154
155 const __m128i vsign = _mm_and_si128(vh, vsign_mask);
156
157 const __m128i vnonsign = _mm_xor_si128(vh, vsign);
158
159 const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
160 const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
161
162 const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
163 const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
164
165 const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
166 const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
167
168 const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
169
170 const __m128i vxmask_lo = _mm_unpacklo_epi16(vmask, vmask);
171 __m128i vf = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
172 _mm_or_si128(_mm_and_si128(vxmask_lo, vnorm_lo), _mm_andnot_si128(vxmask_lo, vdenorm_lo)));
173
174 if (n & (4 * sizeof(uint16_t))) {
175 _mm_storeu_ps(output, _mm_castsi128_ps(vf));
176 output += 4;
177
178 const __m128i vxmask_hi = _mm_unpackhi_epi16(vmask, vmask);
179 vf = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
180 _mm_or_si128(_mm_and_si128(vxmask_hi, vnorm_hi), _mm_andnot_si128(vxmask_hi, vdenorm_hi)));
181 }
182 if (n & (2 * sizeof(uint16_t))) {
183 _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf));
184 output += 2;
185
186 vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf)));
187 }
188 if (n & (1 * sizeof(uint16_t))) {
189 _mm_store_ss(output, _mm_castsi128_ps(vf));
190 }
191 }
192 }
193