xref: /aosp_15_r20/external/XNNPACK/src/qu8-f32-vcvt/gen/vcvt-sse2-x32.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-f32-vcvt/sse2.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <emmintrin.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 #include <xnnpack/vcvt.h>
17 
18 
xnn_qu8_f32_vcvt_ukernel__sse2_x32(size_t n,const uint8_t * x,float * y,const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_qu8_f32_vcvt_ukernel__sse2_x32(
20     size_t n,
21     const uint8_t* x,
22     float* y,
23     const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
24 {
25   assert(n != 0);
26   assert(n % sizeof(uint8_t) == 0);
27   assert(x != NULL);
28   assert(y != NULL);
29 
30   const __m128i vmagic_exp = _mm_load_si128((const __m128i*) params->sse2.magic_exp);
31   const __m128 vmagic_bias = _mm_load_ps(params->sse2.magic_bias);
32   const __m128 vscale = _mm_load_ps(params->sse2.scale);
33   const __m128i vzero = _mm_setzero_si128();
34   for (; n >= 32 * sizeof(uint8_t); n -= 32 * sizeof(uint8_t)) {
35     __m128i vx01234567 = _mm_loadl_epi64((const __m128i*) x);
36     __m128i vx89ABCDEF = _mm_loadl_epi64((const __m128i*) (x + 8));
37     __m128i vxGHIJKLMN = _mm_loadl_epi64((const __m128i*) (x + 16));
38     __m128i vxOPQRSTUV = _mm_loadl_epi64((const __m128i*) (x + 24));
39     x += 32;
40 
41 
42     vx01234567 = _mm_unpacklo_epi8(vx01234567, vzero);
43     vx89ABCDEF = _mm_unpacklo_epi8(vx89ABCDEF, vzero);
44     vxGHIJKLMN = _mm_unpacklo_epi8(vxGHIJKLMN, vzero);
45     vxOPQRSTUV = _mm_unpacklo_epi8(vxOPQRSTUV, vzero);
46 
47     __m128 vy0123 = _mm_castsi128_ps(_mm_unpacklo_epi16(vx01234567, vmagic_exp));
48     __m128 vy4567 = _mm_castsi128_ps(_mm_unpackhi_epi16(vx01234567, vmagic_exp));
49     __m128 vy89AB = _mm_castsi128_ps(_mm_unpacklo_epi16(vx89ABCDEF, vmagic_exp));
50     __m128 vyCDEF = _mm_castsi128_ps(_mm_unpackhi_epi16(vx89ABCDEF, vmagic_exp));
51     __m128 vyGHIJ = _mm_castsi128_ps(_mm_unpacklo_epi16(vxGHIJKLMN, vmagic_exp));
52     __m128 vyKLMN = _mm_castsi128_ps(_mm_unpackhi_epi16(vxGHIJKLMN, vmagic_exp));
53     __m128 vyOPQR = _mm_castsi128_ps(_mm_unpacklo_epi16(vxOPQRSTUV, vmagic_exp));
54     __m128 vySTUV = _mm_castsi128_ps(_mm_unpackhi_epi16(vxOPQRSTUV, vmagic_exp));
55 
56     vy0123 = _mm_sub_ps(vy0123, vmagic_bias);
57     vy4567 = _mm_sub_ps(vy4567, vmagic_bias);
58     vy89AB = _mm_sub_ps(vy89AB, vmagic_bias);
59     vyCDEF = _mm_sub_ps(vyCDEF, vmagic_bias);
60     vyGHIJ = _mm_sub_ps(vyGHIJ, vmagic_bias);
61     vyKLMN = _mm_sub_ps(vyKLMN, vmagic_bias);
62     vyOPQR = _mm_sub_ps(vyOPQR, vmagic_bias);
63     vySTUV = _mm_sub_ps(vySTUV, vmagic_bias);
64 
65     vy0123 = _mm_mul_ps(vy0123, vscale);
66     vy4567 = _mm_mul_ps(vy4567, vscale);
67     vy89AB = _mm_mul_ps(vy89AB, vscale);
68     vyCDEF = _mm_mul_ps(vyCDEF, vscale);
69     vyGHIJ = _mm_mul_ps(vyGHIJ, vscale);
70     vyKLMN = _mm_mul_ps(vyKLMN, vscale);
71     vyOPQR = _mm_mul_ps(vyOPQR, vscale);
72     vySTUV = _mm_mul_ps(vySTUV, vscale);
73 
74     _mm_storeu_ps(y, vy0123);
75     _mm_storeu_ps(y + 4, vy4567);
76     _mm_storeu_ps(y + 8, vy89AB);
77     _mm_storeu_ps(y + 12, vyCDEF);
78     _mm_storeu_ps(y + 16, vyGHIJ);
79     _mm_storeu_ps(y + 20, vyKLMN);
80     _mm_storeu_ps(y + 24, vyOPQR);
81     _mm_storeu_ps(y + 28, vySTUV);
82     y += 32;
83   }
84   for (; n >= 8 * sizeof(uint8_t); n -= 8 * sizeof(uint8_t)) {
85     __m128i vx = _mm_loadl_epi64((const __m128i*) x);
86     vx = _mm_unpacklo_epi8(vx, vzero);
87     x += 8;
88 
89     __m128 vy_lo = _mm_castsi128_ps(_mm_unpacklo_epi16(vx, vmagic_exp));
90     __m128 vy_hi = _mm_castsi128_ps(_mm_unpackhi_epi16(vx, vmagic_exp));
91 
92     vy_lo = _mm_sub_ps(vy_lo, vmagic_bias);
93     vy_hi = _mm_sub_ps(vy_hi, vmagic_bias);
94 
95     vy_lo = _mm_mul_ps(vy_lo, vscale);
96     vy_hi = _mm_mul_ps(vy_hi, vscale);
97 
98     _mm_storeu_ps(y, vy_lo);
99     _mm_storeu_ps(y + 4, vy_hi);
100     y += 8;
101   }
102   if XNN_UNLIKELY(n != 0) {
103     assert(n >= 1 * sizeof(uint8_t));
104     assert(n <= 7 * sizeof(uint8_t));
105 
106     __m128i vx = _mm_loadl_epi64((const __m128i*) x);
107     vx = _mm_unpacklo_epi8(vx, vzero);
108 
109     __m128 vy = _mm_castsi128_ps(_mm_unpacklo_epi16(vx, vmagic_exp));
110     vy = _mm_sub_ps(vy, vmagic_bias);
111     vy = _mm_mul_ps(vy, vscale);
112 
113     if (n & (4 * sizeof(uint8_t))) {
114       _mm_storeu_ps(y, vy);
115       vy = _mm_castsi128_ps(_mm_unpackhi_epi16(vx, vmagic_exp));
116       vy = _mm_sub_ps(vy, vmagic_bias);
117       vy = _mm_mul_ps(vy, vscale);
118       y += 4;
119     }
120     if (n & (2 * sizeof(uint8_t))) {
121       _mm_storel_pi((__m64*) y, vy);
122       vy = _mm_movehl_ps(vy, vy);
123       y += 2;
124     }
125     if (n & (1 * sizeof(uint8_t))) {
126       _mm_store_ss(y, vy);
127     }
128   }
129 }
130