xref: /aosp_15_r20/external/XNNPACK/src/qu8-f32-vcvt/gen/vcvt-avx2-x24.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-f32-vcvt/avx2.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 #include <xnnpack/vcvt.h>
17 
18 
xnn_qu8_f32_vcvt_ukernel__avx2_x24(size_t n,const uint8_t * x,float * y,const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_qu8_f32_vcvt_ukernel__avx2_x24(
20     size_t n,
21     const uint8_t* x,
22     float* y,
23     const union xnn_qu8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
24 {
25   assert(n != 0);
26   assert(n % sizeof(uint8_t) == 0);
27   assert(x != NULL);
28   assert(y != NULL);
29 
30   const __m256i vminus_zero_point = _mm256_load_si256((const __m256i*) params->avx.minus_zero_point);
31   const __m256 vscale = _mm256_load_ps(params->avx.scale);
32   for (; n >= 24 * sizeof(uint8_t); n -= 24 * sizeof(uint8_t)) {
33     __m256i vx01234567 = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) x));
34     __m256i vx89ABCDEF = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) (x + 8)));
35     __m256i vxGHIJKLMN = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) (x + 16)));
36     x += 24;
37 
38     vx01234567 = _mm256_add_epi32(vx01234567, vminus_zero_point);
39     vx89ABCDEF = _mm256_add_epi32(vx89ABCDEF, vminus_zero_point);
40     vxGHIJKLMN = _mm256_add_epi32(vxGHIJKLMN, vminus_zero_point);
41 
42     __m256 vy01234567 = _mm256_cvtepi32_ps(vx01234567);
43     __m256 vy89ABCDEF = _mm256_cvtepi32_ps(vx89ABCDEF);
44     __m256 vyGHIJKLMN = _mm256_cvtepi32_ps(vxGHIJKLMN);
45 
46     vy01234567 = _mm256_mul_ps(vy01234567, vscale);
47     vy89ABCDEF = _mm256_mul_ps(vy89ABCDEF, vscale);
48     vyGHIJKLMN = _mm256_mul_ps(vyGHIJKLMN, vscale);
49 
50     _mm256_storeu_ps(y, vy01234567);
51     _mm256_storeu_ps(y + 8, vy89ABCDEF);
52     _mm256_storeu_ps(y + 16, vyGHIJKLMN);
53     y += 24;
54   }
55   for (; n >= 8 * sizeof(uint8_t); n -= 8 * sizeof(uint8_t)) {
56     __m256i vx = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) x));
57     vx = _mm256_add_epi32(vx, vminus_zero_point);
58     x += 8;
59 
60     __m256 vy = _mm256_cvtepi32_ps(vx);
61     vy = _mm256_mul_ps(vy, vscale);
62 
63     _mm256_storeu_ps(y, vy);
64     y += 8;
65   }
66   if XNN_UNLIKELY(n != 0) {
67     assert(n >= 1 * sizeof(uint8_t));
68     assert(n <= 7 * sizeof(uint8_t));
69 
70     __m256i vx = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i*) x));
71     vx = _mm256_add_epi32(vx, vminus_zero_point);
72 
73     __m256 vy = _mm256_cvtepi32_ps(vx);
74     vy = _mm256_mul_ps(vy, vscale);
75 
76     __m128 vy_lo = _mm256_castps256_ps128(vy);
77     if (n & (4 * sizeof(uint8_t))) {
78       _mm_storeu_ps(y, vy_lo);
79       vy_lo = _mm256_extractf128_ps(vy, 1);
80       y += 4;
81     }
82     if (n & (2 * sizeof(uint8_t))) {
83       _mm_storel_pi((__m64*) y, vy_lo);
84       vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
85       y += 2;
86     }
87     if (n & (1 * sizeof(uint8_t))) {
88       _mm_store_ss(y, vy_lo);
89     }
90   }
91 }
92