xref: /aosp_15_r20/external/XNNPACK/src/qs8-f32-vcvt/gen/vcvt-sse2-x32.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-f32-vcvt/sse2.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <emmintrin.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 #include <xnnpack/vcvt.h>
17 
18 
xnn_qs8_f32_vcvt_ukernel__sse2_x32(size_t n,const int8_t * x,float * y,const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_qs8_f32_vcvt_ukernel__sse2_x32(
20     size_t n,
21     const int8_t* x,
22     float* y,
23     const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
24 {
25   assert(n != 0);
26   assert(n % sizeof(int8_t) == 0);
27   assert(x != NULL);
28   assert(y != NULL);
29 
30   const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->sse2.sign_mask);
31   const __m128i vmagic_exp = _mm_load_si128((const __m128i*) params->sse2.magic_exp);
32   const __m128 vmagic_bias = _mm_load_ps(params->sse2.magic_bias);
33   const __m128 vscale = _mm_load_ps(params->sse2.scale);
34   const __m128i vzero = _mm_setzero_si128();
35   for (; n >= 32 * sizeof(int8_t); n -= 32 * sizeof(int8_t)) {
36     __m128i vx01234567 = _mm_loadl_epi64((const __m128i*) x);
37     __m128i vx89ABCDEF = _mm_loadl_epi64((const __m128i*) (x + 8));
38     __m128i vxGHIJKLMN = _mm_loadl_epi64((const __m128i*) (x + 16));
39     __m128i vxOPQRSTUV = _mm_loadl_epi64((const __m128i*) (x + 24));
40     x += 32;
41 
42     vx01234567 = _mm_xor_si128(vx01234567, vsign_mask);
43     vx89ABCDEF = _mm_xor_si128(vx89ABCDEF, vsign_mask);
44     vxGHIJKLMN = _mm_xor_si128(vxGHIJKLMN, vsign_mask);
45     vxOPQRSTUV = _mm_xor_si128(vxOPQRSTUV, vsign_mask);
46 
47     vx01234567 = _mm_unpacklo_epi8(vx01234567, vzero);
48     vx89ABCDEF = _mm_unpacklo_epi8(vx89ABCDEF, vzero);
49     vxGHIJKLMN = _mm_unpacklo_epi8(vxGHIJKLMN, vzero);
50     vxOPQRSTUV = _mm_unpacklo_epi8(vxOPQRSTUV, vzero);
51 
52     __m128 vy0123 = _mm_castsi128_ps(_mm_unpacklo_epi16(vx01234567, vmagic_exp));
53     __m128 vy4567 = _mm_castsi128_ps(_mm_unpackhi_epi16(vx01234567, vmagic_exp));
54     __m128 vy89AB = _mm_castsi128_ps(_mm_unpacklo_epi16(vx89ABCDEF, vmagic_exp));
55     __m128 vyCDEF = _mm_castsi128_ps(_mm_unpackhi_epi16(vx89ABCDEF, vmagic_exp));
56     __m128 vyGHIJ = _mm_castsi128_ps(_mm_unpacklo_epi16(vxGHIJKLMN, vmagic_exp));
57     __m128 vyKLMN = _mm_castsi128_ps(_mm_unpackhi_epi16(vxGHIJKLMN, vmagic_exp));
58     __m128 vyOPQR = _mm_castsi128_ps(_mm_unpacklo_epi16(vxOPQRSTUV, vmagic_exp));
59     __m128 vySTUV = _mm_castsi128_ps(_mm_unpackhi_epi16(vxOPQRSTUV, vmagic_exp));
60 
61     vy0123 = _mm_sub_ps(vy0123, vmagic_bias);
62     vy4567 = _mm_sub_ps(vy4567, vmagic_bias);
63     vy89AB = _mm_sub_ps(vy89AB, vmagic_bias);
64     vyCDEF = _mm_sub_ps(vyCDEF, vmagic_bias);
65     vyGHIJ = _mm_sub_ps(vyGHIJ, vmagic_bias);
66     vyKLMN = _mm_sub_ps(vyKLMN, vmagic_bias);
67     vyOPQR = _mm_sub_ps(vyOPQR, vmagic_bias);
68     vySTUV = _mm_sub_ps(vySTUV, vmagic_bias);
69 
70     vy0123 = _mm_mul_ps(vy0123, vscale);
71     vy4567 = _mm_mul_ps(vy4567, vscale);
72     vy89AB = _mm_mul_ps(vy89AB, vscale);
73     vyCDEF = _mm_mul_ps(vyCDEF, vscale);
74     vyGHIJ = _mm_mul_ps(vyGHIJ, vscale);
75     vyKLMN = _mm_mul_ps(vyKLMN, vscale);
76     vyOPQR = _mm_mul_ps(vyOPQR, vscale);
77     vySTUV = _mm_mul_ps(vySTUV, vscale);
78 
79     _mm_storeu_ps(y, vy0123);
80     _mm_storeu_ps(y + 4, vy4567);
81     _mm_storeu_ps(y + 8, vy89AB);
82     _mm_storeu_ps(y + 12, vyCDEF);
83     _mm_storeu_ps(y + 16, vyGHIJ);
84     _mm_storeu_ps(y + 20, vyKLMN);
85     _mm_storeu_ps(y + 24, vyOPQR);
86     _mm_storeu_ps(y + 28, vySTUV);
87     y += 32;
88   }
89   for (; n >= 8 * sizeof(int8_t); n -= 8 * sizeof(int8_t)) {
90     __m128i vx = _mm_loadl_epi64((const __m128i*) x);
91     vx = _mm_xor_si128(vx, vsign_mask);
92     vx = _mm_unpacklo_epi8(vx, vzero);
93     x += 8;
94 
95     __m128 vy_lo = _mm_castsi128_ps(_mm_unpacklo_epi16(vx, vmagic_exp));
96     __m128 vy_hi = _mm_castsi128_ps(_mm_unpackhi_epi16(vx, vmagic_exp));
97 
98     vy_lo = _mm_sub_ps(vy_lo, vmagic_bias);
99     vy_hi = _mm_sub_ps(vy_hi, vmagic_bias);
100 
101     vy_lo = _mm_mul_ps(vy_lo, vscale);
102     vy_hi = _mm_mul_ps(vy_hi, vscale);
103 
104     _mm_storeu_ps(y, vy_lo);
105     _mm_storeu_ps(y + 4, vy_hi);
106     y += 8;
107   }
108   if XNN_UNLIKELY(n != 0) {
109     assert(n >= 1 * sizeof(int8_t));
110     assert(n <= 7 * sizeof(int8_t));
111 
112     __m128i vx = _mm_loadl_epi64((const __m128i*) x);
113     vx = _mm_xor_si128(vx, vsign_mask);
114     vx = _mm_unpacklo_epi8(vx, vzero);
115 
116     __m128 vy = _mm_castsi128_ps(_mm_unpacklo_epi16(vx, vmagic_exp));
117     vy = _mm_sub_ps(vy, vmagic_bias);
118     vy = _mm_mul_ps(vy, vscale);
119 
120     if (n & (4 * sizeof(int8_t))) {
121       _mm_storeu_ps(y, vy);
122       vy = _mm_castsi128_ps(_mm_unpackhi_epi16(vx, vmagic_exp));
123       vy = _mm_sub_ps(vy, vmagic_bias);
124       vy = _mm_mul_ps(vy, vscale);
125       y += 4;
126     }
127     if (n & (2 * sizeof(int8_t))) {
128       _mm_storel_pi((__m64*) y, vy);
129       vy = _mm_movehl_ps(vy, vy);
130       y += 2;
131     }
132     if (n & (1 * sizeof(int8_t))) {
133       _mm_store_ss(y, vy);
134     }
135   }
136 }
137