1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-f32-vcvt/sse2.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <emmintrin.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 #include <xnnpack/vcvt.h>
17
18
xnn_qs8_f32_vcvt_ukernel__sse2_x16(size_t n,const int8_t * x,float * y,const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_qs8_f32_vcvt_ukernel__sse2_x16(
20 size_t n,
21 const int8_t* x,
22 float* y,
23 const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
24 {
25 assert(n != 0);
26 assert(n % sizeof(int8_t) == 0);
27 assert(x != NULL);
28 assert(y != NULL);
29
30 const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->sse2.sign_mask);
31 const __m128i vmagic_exp = _mm_load_si128((const __m128i*) params->sse2.magic_exp);
32 const __m128 vmagic_bias = _mm_load_ps(params->sse2.magic_bias);
33 const __m128 vscale = _mm_load_ps(params->sse2.scale);
34 const __m128i vzero = _mm_setzero_si128();
35 for (; n >= 16 * sizeof(int8_t); n -= 16 * sizeof(int8_t)) {
36 __m128i vx01234567 = _mm_loadl_epi64((const __m128i*) x);
37 __m128i vx89ABCDEF = _mm_loadl_epi64((const __m128i*) (x + 8));
38 x += 16;
39
40 vx01234567 = _mm_xor_si128(vx01234567, vsign_mask);
41 vx89ABCDEF = _mm_xor_si128(vx89ABCDEF, vsign_mask);
42
43 vx01234567 = _mm_unpacklo_epi8(vx01234567, vzero);
44 vx89ABCDEF = _mm_unpacklo_epi8(vx89ABCDEF, vzero);
45
46 __m128 vy0123 = _mm_castsi128_ps(_mm_unpacklo_epi16(vx01234567, vmagic_exp));
47 __m128 vy4567 = _mm_castsi128_ps(_mm_unpackhi_epi16(vx01234567, vmagic_exp));
48 __m128 vy89AB = _mm_castsi128_ps(_mm_unpacklo_epi16(vx89ABCDEF, vmagic_exp));
49 __m128 vyCDEF = _mm_castsi128_ps(_mm_unpackhi_epi16(vx89ABCDEF, vmagic_exp));
50
51 vy0123 = _mm_sub_ps(vy0123, vmagic_bias);
52 vy4567 = _mm_sub_ps(vy4567, vmagic_bias);
53 vy89AB = _mm_sub_ps(vy89AB, vmagic_bias);
54 vyCDEF = _mm_sub_ps(vyCDEF, vmagic_bias);
55
56 vy0123 = _mm_mul_ps(vy0123, vscale);
57 vy4567 = _mm_mul_ps(vy4567, vscale);
58 vy89AB = _mm_mul_ps(vy89AB, vscale);
59 vyCDEF = _mm_mul_ps(vyCDEF, vscale);
60
61 _mm_storeu_ps(y, vy0123);
62 _mm_storeu_ps(y + 4, vy4567);
63 _mm_storeu_ps(y + 8, vy89AB);
64 _mm_storeu_ps(y + 12, vyCDEF);
65 y += 16;
66 }
67 for (; n >= 8 * sizeof(int8_t); n -= 8 * sizeof(int8_t)) {
68 __m128i vx = _mm_loadl_epi64((const __m128i*) x);
69 vx = _mm_xor_si128(vx, vsign_mask);
70 vx = _mm_unpacklo_epi8(vx, vzero);
71 x += 8;
72
73 __m128 vy_lo = _mm_castsi128_ps(_mm_unpacklo_epi16(vx, vmagic_exp));
74 __m128 vy_hi = _mm_castsi128_ps(_mm_unpackhi_epi16(vx, vmagic_exp));
75
76 vy_lo = _mm_sub_ps(vy_lo, vmagic_bias);
77 vy_hi = _mm_sub_ps(vy_hi, vmagic_bias);
78
79 vy_lo = _mm_mul_ps(vy_lo, vscale);
80 vy_hi = _mm_mul_ps(vy_hi, vscale);
81
82 _mm_storeu_ps(y, vy_lo);
83 _mm_storeu_ps(y + 4, vy_hi);
84 y += 8;
85 }
86 if XNN_UNLIKELY(n != 0) {
87 assert(n >= 1 * sizeof(int8_t));
88 assert(n <= 7 * sizeof(int8_t));
89
90 __m128i vx = _mm_loadl_epi64((const __m128i*) x);
91 vx = _mm_xor_si128(vx, vsign_mask);
92 vx = _mm_unpacklo_epi8(vx, vzero);
93
94 __m128 vy = _mm_castsi128_ps(_mm_unpacklo_epi16(vx, vmagic_exp));
95 vy = _mm_sub_ps(vy, vmagic_bias);
96 vy = _mm_mul_ps(vy, vscale);
97
98 if (n & (4 * sizeof(int8_t))) {
99 _mm_storeu_ps(y, vy);
100 vy = _mm_castsi128_ps(_mm_unpackhi_epi16(vx, vmagic_exp));
101 vy = _mm_sub_ps(vy, vmagic_bias);
102 vy = _mm_mul_ps(vy, vscale);
103 y += 4;
104 }
105 if (n & (2 * sizeof(int8_t))) {
106 _mm_storel_pi((__m64*) y, vy);
107 vy = _mm_movehl_ps(vy, vy);
108 y += 2;
109 }
110 if (n & (1 * sizeof(int8_t))) {
111 _mm_store_ss(y, vy);
112 }
113 }
114 }
115