1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-f32-vcvt/avx2.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <immintrin.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 #include <xnnpack/vcvt.h>
17
18
xnn_qs8_f32_vcvt_ukernel__avx2_x32(size_t n,const int8_t * x,float * y,const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_qs8_f32_vcvt_ukernel__avx2_x32(
20 size_t n,
21 const int8_t* x,
22 float* y,
23 const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
24 {
25 assert(n != 0);
26 assert(n % sizeof(int8_t) == 0);
27 assert(x != NULL);
28 assert(y != NULL);
29
30 const __m256i vminus_zero_point = _mm256_load_si256((const __m256i*) params->avx.minus_zero_point);
31 const __m256 vscale = _mm256_load_ps(params->avx.scale);
32 for (; n >= 32 * sizeof(int8_t); n -= 32 * sizeof(int8_t)) {
33 __m256i vx01234567 = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) x));
34 __m256i vx89ABCDEF = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (x + 8)));
35 __m256i vxGHIJKLMN = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (x + 16)));
36 __m256i vxOPQRSTUV = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) (x + 24)));
37 x += 32;
38
39 vx01234567 = _mm256_add_epi32(vx01234567, vminus_zero_point);
40 vx89ABCDEF = _mm256_add_epi32(vx89ABCDEF, vminus_zero_point);
41 vxGHIJKLMN = _mm256_add_epi32(vxGHIJKLMN, vminus_zero_point);
42 vxOPQRSTUV = _mm256_add_epi32(vxOPQRSTUV, vminus_zero_point);
43
44 __m256 vy01234567 = _mm256_cvtepi32_ps(vx01234567);
45 __m256 vy89ABCDEF = _mm256_cvtepi32_ps(vx89ABCDEF);
46 __m256 vyGHIJKLMN = _mm256_cvtepi32_ps(vxGHIJKLMN);
47 __m256 vyOPQRSTUV = _mm256_cvtepi32_ps(vxOPQRSTUV);
48
49 vy01234567 = _mm256_mul_ps(vy01234567, vscale);
50 vy89ABCDEF = _mm256_mul_ps(vy89ABCDEF, vscale);
51 vyGHIJKLMN = _mm256_mul_ps(vyGHIJKLMN, vscale);
52 vyOPQRSTUV = _mm256_mul_ps(vyOPQRSTUV, vscale);
53
54 _mm256_storeu_ps(y, vy01234567);
55 _mm256_storeu_ps(y + 8, vy89ABCDEF);
56 _mm256_storeu_ps(y + 16, vyGHIJKLMN);
57 _mm256_storeu_ps(y + 24, vyOPQRSTUV);
58 y += 32;
59 }
60 for (; n >= 8 * sizeof(int8_t); n -= 8 * sizeof(int8_t)) {
61 __m256i vx = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) x));
62 vx = _mm256_add_epi32(vx, vminus_zero_point);
63 x += 8;
64
65 __m256 vy = _mm256_cvtepi32_ps(vx);
66 vy = _mm256_mul_ps(vy, vscale);
67
68 _mm256_storeu_ps(y, vy);
69 y += 8;
70 }
71 if XNN_UNLIKELY(n != 0) {
72 assert(n >= 1 * sizeof(int8_t));
73 assert(n <= 7 * sizeof(int8_t));
74
75 __m256i vx = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i*) x));
76 vx = _mm256_add_epi32(vx, vminus_zero_point);
77
78 __m256 vy = _mm256_cvtepi32_ps(vx);
79 vy = _mm256_mul_ps(vy, vscale);
80
81 __m128 vy_lo = _mm256_castps256_ps128(vy);
82 if (n & (4 * sizeof(int8_t))) {
83 _mm_storeu_ps(y, vy_lo);
84 vy_lo = _mm256_extractf128_ps(vy, 1);
85 y += 4;
86 }
87 if (n & (2 * sizeof(int8_t))) {
88 _mm_storel_pi((__m64*) y, vy_lo);
89 vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
90 y += 2;
91 }
92 if (n & (1 * sizeof(int8_t))) {
93 _mm_store_ss(y, vy_lo);
94 }
95 }
96 }
97