xref: /aosp_15_r20/external/XNNPACK/src/qu8-f32-vcvt/gen/vcvt-sse2-x16.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-f32-vcvt/sse2.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <emmintrin.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 #include <xnnpack/vcvt.h>
17 
18 
xnn_qu8_f32_vcvt_ukernel__sse2_x16(size_t n,const uint8_t * x,float * y,const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_qu8_f32_vcvt_ukernel__sse2_x16(
20     size_t n,
21     const uint8_t* x,
22     float* y,
23     const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
24 {
25   assert(n != 0);
26   assert(n % sizeof(uint8_t) == 0);
27   assert(x != NULL);
28   assert(y != NULL);
29 
30   const __m128i vmagic_exp = _mm_load_si128((const __m128i*) params->sse2.magic_exp);
31   const __m128 vmagic_bias = _mm_load_ps(params->sse2.magic_bias);
32   const __m128 vscale = _mm_load_ps(params->sse2.scale);
33   const __m128i vzero = _mm_setzero_si128();
34   for (; n >= 16 * sizeof(uint8_t); n -= 16 * sizeof(uint8_t)) {
35     __m128i vx01234567 = _mm_loadl_epi64((const __m128i*) x);
36     __m128i vx89ABCDEF = _mm_loadl_epi64((const __m128i*) (x + 8));
37     x += 16;
38 
39 
40     vx01234567 = _mm_unpacklo_epi8(vx01234567, vzero);
41     vx89ABCDEF = _mm_unpacklo_epi8(vx89ABCDEF, vzero);
42 
43     __m128 vy0123 = _mm_castsi128_ps(_mm_unpacklo_epi16(vx01234567, vmagic_exp));
44     __m128 vy4567 = _mm_castsi128_ps(_mm_unpackhi_epi16(vx01234567, vmagic_exp));
45     __m128 vy89AB = _mm_castsi128_ps(_mm_unpacklo_epi16(vx89ABCDEF, vmagic_exp));
46     __m128 vyCDEF = _mm_castsi128_ps(_mm_unpackhi_epi16(vx89ABCDEF, vmagic_exp));
47 
48     vy0123 = _mm_sub_ps(vy0123, vmagic_bias);
49     vy4567 = _mm_sub_ps(vy4567, vmagic_bias);
50     vy89AB = _mm_sub_ps(vy89AB, vmagic_bias);
51     vyCDEF = _mm_sub_ps(vyCDEF, vmagic_bias);
52 
53     vy0123 = _mm_mul_ps(vy0123, vscale);
54     vy4567 = _mm_mul_ps(vy4567, vscale);
55     vy89AB = _mm_mul_ps(vy89AB, vscale);
56     vyCDEF = _mm_mul_ps(vyCDEF, vscale);
57 
58     _mm_storeu_ps(y, vy0123);
59     _mm_storeu_ps(y + 4, vy4567);
60     _mm_storeu_ps(y + 8, vy89AB);
61     _mm_storeu_ps(y + 12, vyCDEF);
62     y += 16;
63   }
64   for (; n >= 8 * sizeof(uint8_t); n -= 8 * sizeof(uint8_t)) {
65     __m128i vx = _mm_loadl_epi64((const __m128i*) x);
66     vx = _mm_unpacklo_epi8(vx, vzero);
67     x += 8;
68 
69     __m128 vy_lo = _mm_castsi128_ps(_mm_unpacklo_epi16(vx, vmagic_exp));
70     __m128 vy_hi = _mm_castsi128_ps(_mm_unpackhi_epi16(vx, vmagic_exp));
71 
72     vy_lo = _mm_sub_ps(vy_lo, vmagic_bias);
73     vy_hi = _mm_sub_ps(vy_hi, vmagic_bias);
74 
75     vy_lo = _mm_mul_ps(vy_lo, vscale);
76     vy_hi = _mm_mul_ps(vy_hi, vscale);
77 
78     _mm_storeu_ps(y, vy_lo);
79     _mm_storeu_ps(y + 4, vy_hi);
80     y += 8;
81   }
82   if XNN_UNLIKELY(n != 0) {
83     assert(n >= 1 * sizeof(uint8_t));
84     assert(n <= 7 * sizeof(uint8_t));
85 
86     __m128i vx = _mm_loadl_epi64((const __m128i*) x);
87     vx = _mm_unpacklo_epi8(vx, vzero);
88 
89     __m128 vy = _mm_castsi128_ps(_mm_unpacklo_epi16(vx, vmagic_exp));
90     vy = _mm_sub_ps(vy, vmagic_bias);
91     vy = _mm_mul_ps(vy, vscale);
92 
93     if (n & (4 * sizeof(uint8_t))) {
94       _mm_storeu_ps(y, vy);
95       vy = _mm_castsi128_ps(_mm_unpackhi_epi16(vx, vmagic_exp));
96       vy = _mm_sub_ps(vy, vmagic_bias);
97       vy = _mm_mul_ps(vy, vscale);
98       y += 4;
99     }
100     if (n & (2 * sizeof(uint8_t))) {
101       _mm_storel_pi((__m64*) y, vy);
102       vy = _mm_movehl_ps(vy, vy);
103       y += 2;
104     }
105     if (n & (1 * sizeof(uint8_t))) {
106       _mm_store_ss(y, vy);
107     }
108   }
109 }
110