xref: /aosp_15_r20/external/XNNPACK/src/f16-f32-vcvt/gen/vcvt-sse41-int32-x32.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f16-f32-vcvt/sse-int32.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <smmintrin.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/vcvt.h>
16 
17 
xnn_f16_f32_vcvt_ukernel__sse41_int32_x32(size_t n,const void * input,float * output,const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f16_f32_vcvt_ukernel__sse41_int32_x32(
19     size_t n,
20     const void* input,
21     float* output,
22     const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
23 {
24   assert(n != 0);
25   assert(n % sizeof(uint16_t) == 0);
26   assert(input != NULL);
27   assert(output != NULL);
28 
29   const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->sse_int32.sign_mask);
30   const __m128i vexp_offset = _mm_load_si128((const __m128i*) params->sse_int32.exp_offset);
31   const __m128 vexp_scale = _mm_load_ps(params->sse_int32.exp_scale);
32   const __m128i vmagic_bias = _mm_load_si128((const __m128i*) params->sse_int32.magic_bias);
33   const __m128i vdenorm_cutoff = _mm_load_si128((const __m128i*) params->sse_int32.denorm_cutoff);
34 
35   const uint16_t* i = (const uint16_t*) input;
36   for (; n >= 32 * sizeof(uint16_t); n -= 32 * sizeof(uint16_t)) {
37     const __m128i vh0 = _mm_loadu_si128((const __m128i*) i);
38     const __m128i vh1 = _mm_loadu_si128((const __m128i*) (i + 8));
39     const __m128i vh2 = _mm_loadu_si128((const __m128i*) (i + 16));
40     const __m128i vh3 = _mm_loadu_si128((const __m128i*) (i + 24));
41     i += 32;
42 
43     const __m128i vw0 = _mm_unpacklo_epi16(_mm_setzero_si128(), vh0);
44     const __m128i vw1 = _mm_unpackhi_epi16(_mm_setzero_si128(), vh0);
45     const __m128i vw2 = _mm_unpacklo_epi16(_mm_setzero_si128(), vh1);
46     const __m128i vw3 = _mm_unpackhi_epi16(_mm_setzero_si128(), vh1);
47     const __m128i vw4 = _mm_unpacklo_epi16(_mm_setzero_si128(), vh2);
48     const __m128i vw5 = _mm_unpackhi_epi16(_mm_setzero_si128(), vh2);
49     const __m128i vw6 = _mm_unpacklo_epi16(_mm_setzero_si128(), vh3);
50     const __m128i vw7 = _mm_unpackhi_epi16(_mm_setzero_si128(), vh3);
51 
52     const __m128i vsign0 = _mm_and_si128(vw0, vsign_mask);
53     const __m128i vsign1 = _mm_and_si128(vw1, vsign_mask);
54     const __m128i vsign2 = _mm_and_si128(vw2, vsign_mask);
55     const __m128i vsign3 = _mm_and_si128(vw3, vsign_mask);
56     const __m128i vsign4 = _mm_and_si128(vw4, vsign_mask);
57     const __m128i vsign5 = _mm_and_si128(vw5, vsign_mask);
58     const __m128i vsign6 = _mm_and_si128(vw6, vsign_mask);
59     const __m128i vsign7 = _mm_and_si128(vw7, vsign_mask);
60 
61     const __m128i vnonsign0 = _mm_xor_si128(vw0, vsign0);
62     const __m128i vnonsign1 = _mm_xor_si128(vw1, vsign1);
63     const __m128i vnonsign2 = _mm_xor_si128(vw2, vsign2);
64     const __m128i vnonsign3 = _mm_xor_si128(vw3, vsign3);
65     const __m128i vnonsign4 = _mm_xor_si128(vw4, vsign4);
66     const __m128i vnonsign5 = _mm_xor_si128(vw5, vsign5);
67     const __m128i vnonsign6 = _mm_xor_si128(vw6, vsign6);
68     const __m128i vnonsign7 = _mm_xor_si128(vw7, vsign7);
69 
70     const __m128i vnorm0 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign0, 3), vexp_offset)), vexp_scale));
71     const __m128i vnorm1 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign1, 3), vexp_offset)), vexp_scale));
72     const __m128i vnorm2 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign2, 3), vexp_offset)), vexp_scale));
73     const __m128i vnorm3 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign3, 3), vexp_offset)), vexp_scale));
74     const __m128i vnorm4 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign4, 3), vexp_offset)), vexp_scale));
75     const __m128i vnorm5 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign5, 3), vexp_offset)), vexp_scale));
76     const __m128i vnorm6 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign6, 3), vexp_offset)), vexp_scale));
77     const __m128i vnorm7 = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign7, 3), vexp_offset)), vexp_scale));
78 
79     const __m128i vdenorm0 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign0, 16), vmagic_bias)), _mm_castsi128_ps(vmagic_bias)));
80     const __m128i vdenorm1 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign1, 16), vmagic_bias)), _mm_castsi128_ps(vmagic_bias)));
81     const __m128i vdenorm2 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign2, 16), vmagic_bias)), _mm_castsi128_ps(vmagic_bias)));
82     const __m128i vdenorm3 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign3, 16), vmagic_bias)), _mm_castsi128_ps(vmagic_bias)));
83     const __m128i vdenorm4 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign4, 16), vmagic_bias)), _mm_castsi128_ps(vmagic_bias)));
84     const __m128i vdenorm5 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign5, 16), vmagic_bias)), _mm_castsi128_ps(vmagic_bias)));
85     const __m128i vdenorm6 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign6, 16), vmagic_bias)), _mm_castsi128_ps(vmagic_bias)));
86     const __m128i vdenorm7 = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign7, 16), vmagic_bias)), _mm_castsi128_ps(vmagic_bias)));
87 
88     const __m128i vmask0 = _mm_cmpgt_epi32(vnonsign0, vdenorm_cutoff);
89     const __m128i vmask1 = _mm_cmpgt_epi32(vnonsign1, vdenorm_cutoff);
90     const __m128i vmask2 = _mm_cmpgt_epi32(vnonsign2, vdenorm_cutoff);
91     const __m128i vmask3 = _mm_cmpgt_epi32(vnonsign3, vdenorm_cutoff);
92     const __m128i vmask4 = _mm_cmpgt_epi32(vnonsign4, vdenorm_cutoff);
93     const __m128i vmask5 = _mm_cmpgt_epi32(vnonsign5, vdenorm_cutoff);
94     const __m128i vmask6 = _mm_cmpgt_epi32(vnonsign6, vdenorm_cutoff);
95     const __m128i vmask7 = _mm_cmpgt_epi32(vnonsign7, vdenorm_cutoff);
96 
97     const __m128i vf0 = _mm_or_si128(vsign0, _mm_blendv_epi8(vdenorm0, vnorm0, vmask0));
98     const __m128i vf1 = _mm_or_si128(vsign1, _mm_blendv_epi8(vdenorm1, vnorm1, vmask1));
99     const __m128i vf2 = _mm_or_si128(vsign2, _mm_blendv_epi8(vdenorm2, vnorm2, vmask2));
100     const __m128i vf3 = _mm_or_si128(vsign3, _mm_blendv_epi8(vdenorm3, vnorm3, vmask3));
101     const __m128i vf4 = _mm_or_si128(vsign4, _mm_blendv_epi8(vdenorm4, vnorm4, vmask4));
102     const __m128i vf5 = _mm_or_si128(vsign5, _mm_blendv_epi8(vdenorm5, vnorm5, vmask5));
103     const __m128i vf6 = _mm_or_si128(vsign6, _mm_blendv_epi8(vdenorm6, vnorm6, vmask6));
104     const __m128i vf7 = _mm_or_si128(vsign7, _mm_blendv_epi8(vdenorm7, vnorm7, vmask7));
105 
106     _mm_storeu_ps(output, _mm_castsi128_ps(vf0));
107     _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf1));
108     _mm_storeu_ps(output + 8, _mm_castsi128_ps(vf2));
109     _mm_storeu_ps(output + 12, _mm_castsi128_ps(vf3));
110     _mm_storeu_ps(output + 16, _mm_castsi128_ps(vf4));
111     _mm_storeu_ps(output + 20, _mm_castsi128_ps(vf5));
112     _mm_storeu_ps(output + 24, _mm_castsi128_ps(vf6));
113     _mm_storeu_ps(output + 28, _mm_castsi128_ps(vf7));
114     output += 32;
115   }
116   for (; n >= 8 * sizeof(uint16_t); n -= 8 * sizeof(uint16_t)) {
117     const __m128i vh = _mm_loadu_si128((const __m128i*) i);
118     i += 8;
119 
120     const __m128i vw_lo = _mm_unpacklo_epi16(_mm_setzero_si128(), vh);
121     const __m128i vw_hi = _mm_unpackhi_epi16(_mm_setzero_si128(), vh);
122 
123     const __m128i vsign_lo = _mm_and_si128(vw_lo, vsign_mask);
124     const __m128i vsign_hi = _mm_and_si128(vw_hi, vsign_mask);
125 
126     const __m128i vnonsign_lo = _mm_xor_si128(vw_lo, vsign_lo);
127     const __m128i vnonsign_hi = _mm_xor_si128(vw_hi, vsign_hi);
128 
129     const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_lo, 3), vexp_offset)), vexp_scale));
130     const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_hi, 3), vexp_offset)), vexp_scale));
131 
132     const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_lo, 16), vmagic_bias)), _mm_castsi128_ps(vmagic_bias)));
133     const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_hi, 16), vmagic_bias)), _mm_castsi128_ps(vmagic_bias)));
134 
135     const __m128i vmask_lo = _mm_cmpgt_epi32(vnonsign_lo, vdenorm_cutoff);
136     const __m128i vf_lo = _mm_or_si128(vsign_lo, _mm_blendv_epi8(vdenorm_lo, vnorm_lo, vmask_lo));
137 
138     const __m128i vmask_hi = _mm_cmpgt_epi32(vnonsign_hi, vdenorm_cutoff);
139     const __m128i vf_hi = _mm_or_si128(vsign_hi, _mm_blendv_epi8(vdenorm_hi, vnorm_hi, vmask_hi));
140 
141     _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo));
142     _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi));
143     output += 8;
144   }
145   if XNN_UNPREDICTABLE(n != 0) {
146     const __m128i vh = _mm_loadu_si128((const __m128i*) i);
147 
148     const __m128i vw_lo = _mm_unpacklo_epi16(_mm_setzero_si128(), vh);
149     const __m128i vw_hi = _mm_unpackhi_epi16(_mm_setzero_si128(), vh);
150 
151     const __m128i vsign_lo = _mm_and_si128(vw_lo, vsign_mask);
152     const __m128i vsign_hi = _mm_and_si128(vw_hi, vsign_mask);
153 
154     const __m128i vnonsign_lo = _mm_xor_si128(vw_lo, vsign_lo);
155     const __m128i vnonsign_hi = _mm_xor_si128(vw_hi, vsign_hi);
156 
157     const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_lo, 3), vexp_offset)), vexp_scale));
158     const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_add_epi32(_mm_srli_epi32(vnonsign_hi, 3), vexp_offset)), vexp_scale));
159 
160     const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_lo, 16), vmagic_bias)), _mm_castsi128_ps(vmagic_bias)));
161     const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_or_si128(_mm_srli_epi32(vnonsign_hi, 16), vmagic_bias)), _mm_castsi128_ps(vmagic_bias)));
162 
163     const __m128i vmask_lo = _mm_cmpgt_epi32(vnonsign_lo, vdenorm_cutoff);
164     __m128i vf = _mm_or_si128(vsign_lo, _mm_blendv_epi8(vdenorm_lo, vnorm_lo, vmask_lo));
165 
166     if (n & (4 * sizeof(uint16_t))) {
167       _mm_storeu_ps(output, _mm_castsi128_ps(vf));
168       output += 4;
169 
170       const __m128i vmask_hi = _mm_cmpgt_epi32(vnonsign_hi, vdenorm_cutoff);
171       vf = _mm_or_si128(vsign_hi, _mm_blendv_epi8(vdenorm_hi, vnorm_hi, vmask_hi));
172     }
173     if (n & (2 * sizeof(uint16_t))) {
174       _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf));
175       output += 2;
176 
177       vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf)));
178     }
179     if (n & (1 * sizeof(uint16_t))) {
180       _mm_store_ss(output, _mm_castsi128_ps(vf));
181     }
182   }
183 }
184