xref: /aosp_15_r20/external/XNNPACK/src/qs8-f32-vcvt/gen/vcvt-sse2-x24.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-f32-vcvt/sse2.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <emmintrin.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 #include <xnnpack/vcvt.h>
17 
18 
xnn_qs8_f32_vcvt_ukernel__sse2_x24(size_t n,const int8_t * x,float * y,const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_qs8_f32_vcvt_ukernel__sse2_x24(
20     size_t n,
21     const int8_t* x,
22     float* y,
23     const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
24 {
25   assert(n != 0);
26   assert(n % sizeof(int8_t) == 0);
27   assert(x != NULL);
28   assert(y != NULL);
29 
30   const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->sse2.sign_mask);
31   const __m128i vmagic_exp = _mm_load_si128((const __m128i*) params->sse2.magic_exp);
32   const __m128 vmagic_bias = _mm_load_ps(params->sse2.magic_bias);
33   const __m128 vscale = _mm_load_ps(params->sse2.scale);
34   const __m128i vzero = _mm_setzero_si128();
35   for (; n >= 24 * sizeof(int8_t); n -= 24 * sizeof(int8_t)) {
36     __m128i vx01234567 = _mm_loadl_epi64((const __m128i*) x);
37     __m128i vx89ABCDEF = _mm_loadl_epi64((const __m128i*) (x + 8));
38     __m128i vxGHIJKLMN = _mm_loadl_epi64((const __m128i*) (x + 16));
39     x += 24;
40 
41     vx01234567 = _mm_xor_si128(vx01234567, vsign_mask);
42     vx89ABCDEF = _mm_xor_si128(vx89ABCDEF, vsign_mask);
43     vxGHIJKLMN = _mm_xor_si128(vxGHIJKLMN, vsign_mask);
44 
45     vx01234567 = _mm_unpacklo_epi8(vx01234567, vzero);
46     vx89ABCDEF = _mm_unpacklo_epi8(vx89ABCDEF, vzero);
47     vxGHIJKLMN = _mm_unpacklo_epi8(vxGHIJKLMN, vzero);
48 
49     __m128 vy0123 = _mm_castsi128_ps(_mm_unpacklo_epi16(vx01234567, vmagic_exp));
50     __m128 vy4567 = _mm_castsi128_ps(_mm_unpackhi_epi16(vx01234567, vmagic_exp));
51     __m128 vy89AB = _mm_castsi128_ps(_mm_unpacklo_epi16(vx89ABCDEF, vmagic_exp));
52     __m128 vyCDEF = _mm_castsi128_ps(_mm_unpackhi_epi16(vx89ABCDEF, vmagic_exp));
53     __m128 vyGHIJ = _mm_castsi128_ps(_mm_unpacklo_epi16(vxGHIJKLMN, vmagic_exp));
54     __m128 vyKLMN = _mm_castsi128_ps(_mm_unpackhi_epi16(vxGHIJKLMN, vmagic_exp));
55 
56     vy0123 = _mm_sub_ps(vy0123, vmagic_bias);
57     vy4567 = _mm_sub_ps(vy4567, vmagic_bias);
58     vy89AB = _mm_sub_ps(vy89AB, vmagic_bias);
59     vyCDEF = _mm_sub_ps(vyCDEF, vmagic_bias);
60     vyGHIJ = _mm_sub_ps(vyGHIJ, vmagic_bias);
61     vyKLMN = _mm_sub_ps(vyKLMN, vmagic_bias);
62 
63     vy0123 = _mm_mul_ps(vy0123, vscale);
64     vy4567 = _mm_mul_ps(vy4567, vscale);
65     vy89AB = _mm_mul_ps(vy89AB, vscale);
66     vyCDEF = _mm_mul_ps(vyCDEF, vscale);
67     vyGHIJ = _mm_mul_ps(vyGHIJ, vscale);
68     vyKLMN = _mm_mul_ps(vyKLMN, vscale);
69 
70     _mm_storeu_ps(y, vy0123);
71     _mm_storeu_ps(y + 4, vy4567);
72     _mm_storeu_ps(y + 8, vy89AB);
73     _mm_storeu_ps(y + 12, vyCDEF);
74     _mm_storeu_ps(y + 16, vyGHIJ);
75     _mm_storeu_ps(y + 20, vyKLMN);
76     y += 24;
77   }
78   for (; n >= 8 * sizeof(int8_t); n -= 8 * sizeof(int8_t)) {
79     __m128i vx = _mm_loadl_epi64((const __m128i*) x);
80     vx = _mm_xor_si128(vx, vsign_mask);
81     vx = _mm_unpacklo_epi8(vx, vzero);
82     x += 8;
83 
84     __m128 vy_lo = _mm_castsi128_ps(_mm_unpacklo_epi16(vx, vmagic_exp));
85     __m128 vy_hi = _mm_castsi128_ps(_mm_unpackhi_epi16(vx, vmagic_exp));
86 
87     vy_lo = _mm_sub_ps(vy_lo, vmagic_bias);
88     vy_hi = _mm_sub_ps(vy_hi, vmagic_bias);
89 
90     vy_lo = _mm_mul_ps(vy_lo, vscale);
91     vy_hi = _mm_mul_ps(vy_hi, vscale);
92 
93     _mm_storeu_ps(y, vy_lo);
94     _mm_storeu_ps(y + 4, vy_hi);
95     y += 8;
96   }
97   if XNN_UNLIKELY(n != 0) {
98     assert(n >= 1 * sizeof(int8_t));
99     assert(n <= 7 * sizeof(int8_t));
100 
101     __m128i vx = _mm_loadl_epi64((const __m128i*) x);
102     vx = _mm_xor_si128(vx, vsign_mask);
103     vx = _mm_unpacklo_epi8(vx, vzero);
104 
105     __m128 vy = _mm_castsi128_ps(_mm_unpacklo_epi16(vx, vmagic_exp));
106     vy = _mm_sub_ps(vy, vmagic_bias);
107     vy = _mm_mul_ps(vy, vscale);
108 
109     if (n & (4 * sizeof(int8_t))) {
110       _mm_storeu_ps(y, vy);
111       vy = _mm_castsi128_ps(_mm_unpackhi_epi16(vx, vmagic_exp));
112       vy = _mm_sub_ps(vy, vmagic_bias);
113       vy = _mm_mul_ps(vy, vscale);
114       y += 4;
115     }
116     if (n & (2 * sizeof(int8_t))) {
117       _mm_storel_pi((__m64*) y, vy);
118       vy = _mm_movehl_ps(vy, vy);
119       y += 2;
120     }
121     if (n & (1 * sizeof(int8_t))) {
122       _mm_store_ss(y, vy);
123     }
124   }
125 }
126