1 // Auto-generated file. Do not edit!
2 // Template: src/f16-f32-vcvt/sse-int16.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <emmintrin.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/vcvt.h>
16
17
xnn_f16_f32_vcvt_ukernel__sse2_int16_x8(size_t n,const void * input,float * output,const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f16_f32_vcvt_ukernel__sse2_int16_x8(
19 size_t n,
20 const void* input,
21 float* output,
22 const union xnn_f16_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
23 {
24 assert(n != 0);
25 assert(n % sizeof(uint16_t) == 0);
26 assert(input != NULL);
27 assert(output != NULL);
28
29 const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->sse_int16.sign_mask);
30 const __m128i vexp_offset = _mm_load_si128((const __m128i*) params->sse_int16.exp_offset);
31 const __m128 vexp_scale = _mm_load_ps(params->sse_int16.exp_scale);
32 const __m128i vmagic_mask = _mm_load_si128((const __m128i*) params->sse_int16.magic_mask);
33 const __m128 vmagic_bias = _mm_load_ps(params->sse_int16.magic_bias);
34 const __m128i vdenorm_cutoff = _mm_load_si128((const __m128i*) params->sse_int16.denorm_cutoff);
35
36 const uint16_t* i = (const uint16_t*) input;
37 for (; n >= 8 * sizeof(uint16_t); n -= 8 * sizeof(uint16_t)) {
38 const __m128i vh = _mm_loadu_si128((const __m128i*) i);
39 i += 8;
40
41 const __m128i vsign = _mm_and_si128(vh, vsign_mask);
42
43 const __m128i vnonsign = _mm_xor_si128(vh, vsign);
44
45 const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
46 const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
47
48 const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
49 const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
50
51 const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
52 const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
53
54 const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
55
56 const __m128i vxmask_lo = _mm_unpacklo_epi16(vmask, vmask);
57 const __m128i vf_lo = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
58 _mm_or_si128(_mm_and_si128(vxmask_lo, vnorm_lo), _mm_andnot_si128(vxmask_lo, vdenorm_lo)));
59
60 const __m128i vxmask_hi = _mm_unpackhi_epi16(vmask, vmask);
61 const __m128i vf_hi = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
62 _mm_or_si128(_mm_and_si128(vxmask_hi, vnorm_hi), _mm_andnot_si128(vxmask_hi, vdenorm_hi)));
63
64 _mm_storeu_ps(output, _mm_castsi128_ps(vf_lo));
65 _mm_storeu_ps(output + 4, _mm_castsi128_ps(vf_hi));
66 output += 8;
67 }
68 if XNN_UNPREDICTABLE(n != 0) {
69 const __m128i vh = _mm_loadu_si128((const __m128i*) i);
70
71 const __m128i vsign = _mm_and_si128(vh, vsign_mask);
72
73 const __m128i vnonsign = _mm_xor_si128(vh, vsign);
74
75 const __m128i vprenorm_lo = _mm_slli_epi16(vnonsign, 13);
76 const __m128i vprenorm_hi = _mm_add_epi16(_mm_srli_epi16(vnonsign, 3), vexp_offset);
77
78 const __m128i vnorm_lo = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
79 const __m128i vnorm_hi = _mm_castps_si128(_mm_mul_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vprenorm_lo, vprenorm_hi)), vexp_scale));
80
81 const __m128i vdenorm_lo = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpacklo_epi16(vnonsign, vmagic_mask)), vmagic_bias));
82 const __m128i vdenorm_hi = _mm_castps_si128(_mm_sub_ps(_mm_castsi128_ps(_mm_unpackhi_epi16(vnonsign, vmagic_mask)), vmagic_bias));
83
84 const __m128i vmask = _mm_cmpgt_epi16(vnonsign, vdenorm_cutoff);
85
86 const __m128i vxmask_lo = _mm_unpacklo_epi16(vmask, vmask);
87 __m128i vf = _mm_or_si128(_mm_unpacklo_epi16(_mm_setzero_si128(), vsign),
88 _mm_or_si128(_mm_and_si128(vxmask_lo, vnorm_lo), _mm_andnot_si128(vxmask_lo, vdenorm_lo)));
89
90 if (n & (4 * sizeof(uint16_t))) {
91 _mm_storeu_ps(output, _mm_castsi128_ps(vf));
92 output += 4;
93
94 const __m128i vxmask_hi = _mm_unpackhi_epi16(vmask, vmask);
95 vf = _mm_or_si128(_mm_unpackhi_epi16(_mm_setzero_si128(), vsign),
96 _mm_or_si128(_mm_and_si128(vxmask_hi, vnorm_hi), _mm_andnot_si128(vxmask_hi, vdenorm_hi)));
97 }
98 if (n & (2 * sizeof(uint16_t))) {
99 _mm_storel_pi((__m64*) output, _mm_castsi128_ps(vf));
100 output += 2;
101
102 vf = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(vf), _mm_castsi128_ps(vf)));
103 }
104 if (n & (1 * sizeof(uint16_t))) {
105 _mm_store_ss(output, _mm_castsi128_ps(vf));
106 }
107 }
108 }
109